forked from ElementsProject/lightning
-
Notifications
You must be signed in to change notification settings - Fork 1
/
test_connection.py
4272 lines (3349 loc) · 167 KB
/
test_connection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import RpcError, Millisatoshi
import pyln.proto.wire as wire
from utils import (
only_one, wait_for, sync_blockheight, TIMEOUT,
expected_peer_features, expected_node_features,
expected_channel_features,
check_coin_moves, first_channel_id, account_balance, basic_fee,
scriptpubkey_addr, default_ln_port,
EXPERIMENTAL_FEATURES, mine_funding_to_announce, first_scid
)
from pyln.testing.utils import SLOW_MACHINE, VALGRIND, EXPERIMENTAL_DUAL_FUND, FUNDAMOUNT
import os
import pytest
import random
import re
import time
import unittest
import websocket
def test_connect_basic(node_factory):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
# These should be in openingd.
assert l1.rpc.getpeer(l2.info['id'])['connected']
assert l2.rpc.getpeer(l1.info['id'])['connected']
assert len(l1.rpc.getpeer(l2.info['id'])['channels']) == 0
assert len(l2.rpc.getpeer(l1.info['id'])['channels']) == 0
# Reconnect should be a noop
ret = l1.rpc.connect(l2.info['id'], 'localhost', port=l2.port)
assert ret['id'] == l2.info['id']
assert ret['address'] == {'type': 'ipv4', 'address': '127.0.0.1', 'port': l2.port}
ret = l2.rpc.connect(l1.info['id'], host='localhost', port=l1.port)
assert ret['id'] == l1.info['id']
# FIXME: This gives a bogus address (since they connected to us): better to give none!
assert 'address' in ret
# Should still only have one peer!
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
# Should get reasonable error if unknown addr for peer.
with pytest.raises(RpcError, match=r'Unable to connect, no address known'):
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e')
# Should get reasonable error if connection refuse.
with pytest.raises(RpcError, match=r'Connection establishment: Connection refused'):
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', 1)
# Should get reasonable error if wrong key for peer.
with pytest.raises(RpcError, match=r'Cryptographic handshake: peer closed connection \(wrong key\?\)'):
l1.rpc.connect('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'localhost', l2.port)
@pytest.mark.developer("needs DEVELOPER=1 for fast gossip and --dev-allow-localhost for local remote_addr")
def test_remote_addr(node_factory, bitcoind):
"""Check address discovery (BOLT1 #917) init remote_addr works as designed:
`node_announcement` update must only be send out when:
- at least two peers
- we have a channel with
- report the same `remote_addr`
We perform logic tests on L2, setup:
l1 --> [l2] <-- l3
"""
# don't announce anything per se
opts = {'may_reconnect': True,
'dev-allow-localhost': None,
'dev-no-reconnect': None}
l1, l2, l3 = node_factory.get_nodes(3, opts)
# Disable announcing local autobind addresses with dev-allow-localhost.
# We need to have l2 opts 'bind-addr' to the (generated) value of 'addr'.
# So we stop, set 'bind-addr' option, delete 'addr' and restart first.
l2.stop()
l2.daemon.opts['bind-addr'] = l2.daemon.opts['addr']
del l2.daemon.opts['addr']
l2.start()
assert len(l2.rpc.getinfo()['address']) == 0
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
logmsg = l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
# check 'listpeers' contains the 'remote_addr' as logged
assert logmsg.endswith(l2.rpc.listpeers()['peers'][0]['remote_addr'])
# Fund first channel so initial node_announcement is send
# and also check no addresses have been announced yet
l1.fundchannel(l2)
bitcoind.generate_block(5)
l1.daemon.wait_for_log(f"Received node_announcement for node {l2.info['id']}")
assert(len(l1.rpc.listnodes(l2.info['id'])['nodes'][0]['addresses']) == 0)
assert len(l2.rpc.getinfo()['address']) == 0
def_port = default_ln_port(l2.info["network"])
# when we restart l1 with a channel and reconnect, node_annoucement update
# must not yet be send as we need the same `remote_addr` confirmed from a
# another peer we have a channel with.
# Note: In this state l2 stores remote_addr as reported by l1
assert not l2.daemon.is_in_log("Update our node_announcement for discovered address: 127.0.0.1:{}".format(def_port))
l1.restart()
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
# Now l1 sees l2 but without announced addresses.
assert(len(l1.rpc.listnodes(l2.info['id'])['nodes'][0]['addresses']) == 0)
assert not l2.daemon.is_in_log("Update our node_announcement for discovered address: 127.0.0.1:{}".format(def_port))
assert len(l2.rpc.getinfo()['address']) == 0
# connect second node. This will not yet trigger `node_annoucement` update,
# as we again do not have a channel at the time we connected.
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
# fund channel and check we didn't send Update earlier already
l2.fundchannel(l3, wait_for_active=True)
bitcoind.generate_block(5)
assert not l2.daemon.is_in_log("Update our node_announcement for discovered address: 127.0.0.1:{}".format(def_port))
assert len(l2.rpc.getinfo()['address']) == 0
# restart, reconnect and re-check for updated node_annoucement. This time
# l2 sees that two different peers with channel reported the same `remote_addr`.
l3.restart()
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
l2.daemon.wait_for_log("Update our node_announcement for discovered address: 127.0.0.1:{}".format(def_port))
l1.daemon.wait_for_log(f"Received node_announcement for node {l2.info['id']}")
# check l1 sees the updated node announcement via CLI listnodes
address = l1.rpc.listnodes(l2.info['id'])['nodes'][0]['addresses'][0]
assert address['type'] == "ipv4"
assert address['address'] == "127.0.0.1"
assert address['port'] == def_port
# also check l2 returns the announced address (and port) via CLI getinfo
getinfo = l2.rpc.getinfo()
assert len(getinfo['address']) == 1
assert getinfo['address'][0]['type'] == 'ipv4'
assert getinfo['address'][0]['address'] == '127.0.0.1'
assert getinfo['address'][0]['port'] == def_port
@pytest.mark.developer("needs DEVELOPER=1 for fast gossip and --dev-allow-localhost for local remote_addr")
def test_remote_addr_disabled(node_factory, bitcoind):
"""Simply tests that IP address discovery annoucements can be turned off
We perform logic tests on L2, setup:
l1 --> [l2] <-- l3
"""
opts = {'dev-allow-localhost': None,
'disable-ip-discovery': None,
'may_reconnect': True,
'dev-no-reconnect': None}
l1, l2, l3 = node_factory.get_nodes(3, opts=[opts, opts, opts])
# l1->l2
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
l1.fundchannel(l2)
bitcoind.generate_block(5)
l1.daemon.wait_for_log(f"Received node_announcement for node {l2.info['id']}")
# l2->l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.daemon.wait_for_log("Peer says it sees our address as: 127.0.0.1:[0-9]{5}")
l2.fundchannel(l3)
bitcoind.generate_block(5)
# restart both and wait for channels to be ready
l1.restart()
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log("Already have funding locked in")
l3.restart()
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.daemon.wait_for_log("Already have funding locked in")
# if ip discovery would have been enabled, we would have send an updated
# node_annoucement by now. Check we didn't...
assert not l2.daemon.is_in_log("Update our node_announcement for discovered address")
def test_connect_standard_addr(node_factory):
"""Test standard node@host:port address
"""
l1, l2, l3 = node_factory.get_nodes(3)
# node@host
ret = l1.rpc.connect("{}@{}".format(l2.info['id'], 'localhost'), port=l2.port)
assert ret['id'] == l2.info['id']
assert ret['address'] == {'type': 'ipv4', 'address': '127.0.0.1', 'port': l2.port}
# node@host:port
ret = l1.rpc.connect("{}@localhost:{}".format(l3.info['id'], l3.port))
assert ret['id'] == l3.info['id']
# node@[ipv6]:port --- not supported by our CI
# ret = l1.rpc.connect("{}@[::1]:{}".format(l3.info['id'], l3.port))
# assert ret['id'] == l3.info['id']
def test_reconnect_channel_peers(node_factory, executor):
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
l2.restart()
# Should reconnect.
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Connect command should succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Stop l2 and wait for l1 to notice.
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
# Now should fail.
with pytest.raises(RpcError, match=r'(Connection refused|Bad file descriptor)'):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Wait for exponential backoff to give us a 2 second window.
l1.daemon.wait_for_log('Will try reconnect in 2 seconds')
# It should now succeed when it restarts.
l2.start()
# Multiples should be fine!
fut1 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut2 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut3 = executor.submit(l1.rpc.connect, l2.info['id'], 'localhost', l2.port)
fut1.result(10)
fut2.result(10)
fut3.result(10)
def test_connection_moved(node_factory, executor):
slow_start = os.path.join(os.getcwd(), 'tests/plugins/slow_start.py')
options = {'may_reconnect': True, 'plugin': slow_start}
l1, l2 = node_factory.get_nodes(2, opts=options)
# Set up the plugin to wait for a connection
executor.submit(l1.rpc.waitconn)
log = l1.daemon.wait_for_log('listening for connections')
match = re.search(r'on port (\d*)', log)
assert match and len(match.groups()) == 1
hang_port = int(match.groups()[0])
# Attempt connection
fut_hang = executor.submit(l1.rpc.connect, l2.info['id'],
'localhost', hang_port)
l1.daemon.wait_for_log('connection from')
# Provide correct connection details
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['address'] == {'type': 'ipv4', 'address': '127.0.0.1', 'port': l2.port}
# If we failed to update the connection, this call will error
fut_hang.result(TIMEOUT)
def test_balance(node_factory):
l1, l2 = node_factory.line_graph(2, fundchannel=True)
p1 = only_one(l1.rpc.getpeer(peer_id=l2.info['id'], level='info')['channels'])
p2 = only_one(l2.rpc.getpeer(l1.info['id'], 'info')['channels'])
assert p1['to_us_msat'] == 10**6 * 1000
assert p1['total_msat'] == 10**6 * 1000
assert p2['to_us_msat'] == 0
assert p2['total_msat'] == 10**6 * 1000
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_bad_opening(node_factory):
# l1 asks for a too-long locktime
l1 = node_factory.get_node(options={'watchtime-blocks': 100})
l2 = node_factory.get_node(options={'max-locktime-blocks': 99})
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handed peer, entering loop')
l2.daemon.wait_for_log('Handed peer, entering loop')
l1.fundwallet(10**6 + 1000000)
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 10**6)
l2.daemon.wait_for_log('to_self_delay 100 larger than 99')
@pytest.mark.developer("gossip without DEVELOPER=1 is slow, need dev-no-reconnect")
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee computation and limits are network specific")
@pytest.mark.slow_test
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_opening_tiny_channel(node_factory):
# Test custom min-capacity-sat parameters
#
# [l1]-----> [l2] (~6000) - technical minimal value that wont be rejected
# \
# o---> [l3] (10000) - the current default
# \
# o-> [l4] (20000) - a node with a higher minimal value
#
# For each:
# 1. Try to establish channel with capacity 1sat smaller than min_capacity_sat
# 2. Try to establish channel with capacity exact min_capacity_sat
#
# BOLT2
# The receiving node MAY fail the channel if:
# - funding_satoshis is too small
# - it considers `feerate_per_kw` too small for timely processing or unreasonably large.
#
dustlimit = 546
reserves = 2 * dustlimit
min_commit_tx_fees = basic_fee(7500)
overhead = reserves + min_commit_tx_fees
if EXPERIMENTAL_FEATURES or EXPERIMENTAL_DUAL_FUND:
# Gotta fund those anchors too!
overhead += 660
l2_min_capacity = 1 # just enough to get past capacity filter
l3_min_capacity = 10000 # the current default
l4_min_capacity = 20000 # a server with more than default minimum
opts = [{'min-capacity-sat': 0, 'dev-no-reconnect': None},
{'min-capacity-sat': l2_min_capacity, 'dev-no-reconnect': None},
{'min-capacity-sat': l3_min_capacity, 'dev-no-reconnect': None},
{'min-capacity-sat': l4_min_capacity, 'dev-no-reconnect': None}]
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l2, l2_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, l2_min_capacity + overhead)
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l3, l3_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l3.info['id'])['peers'] == [])
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.fundchannel(l3, l3_min_capacity + overhead)
with pytest.raises(RpcError, match=r'They sent [error|warning].*channel capacity is .*, which is below .*sat'):
l1.fundchannel(l4, l4_min_capacity + overhead - 1)
wait_for(lambda: l1.rpc.listpeers(l4.info['id'])['peers'] == [])
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
l1.fundchannel(l4, l4_min_capacity + overhead)
# Note that this check applies locally too, so you can't open it if
# you would reject it.
l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r"channel capacity is .*, which is below .*sat"):
l3.fundchannel(l2, l3_min_capacity + overhead - 1)
wait_for(lambda: l3.rpc.listpeers(l2.info['id'])['peers'] == [])
l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
l3.fundchannel(l2, l3_min_capacity + overhead)
def test_second_channel(node_factory):
l1, l2, l3 = node_factory.get_nodes(3)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.fundchannel(l2, 10**6)
l1.fundchannel(l3, 10**6)
def test_channel_abandon(node_factory, bitcoind):
"""Our open tx isn't mined, we doublespend it away"""
l1, l2 = node_factory.get_nodes(2)
SATS = 10**6
# Add some for fees
l1.fundwallet(SATS + 10000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], SATS, feerate='1875perkw')
opening_utxo = only_one([o for o in l1.rpc.listfunds()['outputs'] if o['reserved']])
psbt = l1.rpc.utxopsbt(0, "253perkw", 0, [opening_utxo['txid'] + ':' + str(opening_utxo['output'])], reserve=0, reservedok=True)['psbt']
# We expect a reservation for 2016 blocks; unreserve it.
reservations = only_one(l1.rpc.unreserveinputs(psbt, reserve=2015)['reservations'])
assert reservations['reserved']
assert reservations['reserved_to_block'] == bitcoind.rpc.getblockchaininfo()['blocks'] + 1
assert only_one(l1.rpc.unreserveinputs(psbt, reserve=1)['reservations'])['reserved'] is False
# Now it's unreserved, we can doublespend it (as long as we exceed
# previous fee to RBF!).
withdraw = l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], "all")
assert bitcoind.rpc.decoderawtransaction(withdraw['tx'])['vout'][0]['value'] > SATS / 10**8
bitcoind.generate_block(1, wait_for_mempool=withdraw['txid'])
# FIXME: lightningd should notice channel will never now open!
print(l1.rpc.listpeers())
assert (only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state']
== 'CHANNELD_AWAITING_LOCKIN')
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_disconnect(node_factory):
# These should all make us fail
disconnects = ['-WIRE_INIT',
'+WIRE_INIT']
l1 = node_factory.get_node(disconnect=disconnects)
l2 = node_factory.get_node()
with pytest.raises(RpcError):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Should have 3 connect fails.
for d in disconnects:
l1.daemon.wait_for_log('{}-.*Failed connected out'
.format(l2.info['id']))
# Should still only have one peer!
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_disconnect_opener(node_factory):
# Now error on opener side during channel open.
disconnects = ['-WIRE_OPEN_CHANNEL',
'+WIRE_OPEN_CHANNEL',
'-WIRE_FUNDING_CREATED']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['-WIRE_OPEN_CHANNEL2',
'+WIRE_OPEN_CHANNEL2',
'-WIRE_TX_ADD_INPUT',
'+WIRE_TX_ADD_INPUT',
'-WIRE_TX_ADD_OUTPUT',
'+WIRE_TX_ADD_OUTPUT',
'-WIRE_TX_COMPLETE',
'+WIRE_TX_COMPLETE']
l1 = node_factory.get_node(disconnect=disconnects)
l2 = node_factory.get_node(may_reconnect=EXPERIMENTAL_DUAL_FUND)
l1.fundwallet(2000000)
for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 25000)
# First peer valishes, but later it just disconnects
wait_for(lambda: all([p['connected'] is False for p in l1.rpc.listpeers()['peers']]))
# This one will succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 25000)
# Should still only have one peer!
assert len(l1.rpc.listpeers()['peers']) == 1
assert len(l2.rpc.listpeers()['peers']) == 1
def test_remote_disconnect(node_factory):
l1, l2 = node_factory.get_nodes(2)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
wait_for(lambda: l2.rpc.listpeers()['peers'] != [])
l2.rpc.disconnect(l1.info['id'])
# l1 should notice!
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_disconnect_fundee(node_factory):
# Now error on fundee side during channel open.
disconnects = ['-WIRE_ACCEPT_CHANNEL',
'+WIRE_ACCEPT_CHANNEL']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['-WIRE_ACCEPT_CHANNEL2',
'+WIRE_ACCEPT_CHANNEL2',
'-WIRE_TX_COMPLETE',
'+WIRE_TX_COMPLETE']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l1.fundwallet(2000000)
for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 25000)
# First peer valishes, but later it just disconnects
wait_for(lambda: all([p['connected'] is False for p in l1.rpc.listpeers()['peers']]))
# This one will succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 25000)
# Should still only have one peer!
assert len(l1.rpc.listpeers()) == 1
assert len(l2.rpc.listpeers()) == 1
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.developer
@pytest.mark.openchannel('v2')
def test_disconnect_fundee_v2(node_factory):
# Now error on fundee side during channel open, with them funding
disconnects = ['-WIRE_ACCEPT_CHANNEL2',
'+WIRE_ACCEPT_CHANNEL2',
'-WIRE_TX_ADD_INPUT',
'+WIRE_TX_ADD_INPUT',
'-WIRE_TX_ADD_OUTPUT',
'+WIRE_TX_ADD_OUTPUT',
'-WIRE_TX_COMPLETE',
'+WIRE_TX_COMPLETE']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects,
options={'funder-policy': 'match',
'funder-policy-mod': 100,
'funder-fuzz-percent': 0,
'funder-lease-requests-only': False})
l1.fundwallet(2000000)
l2.fundwallet(2000000)
for d in disconnects:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 25000)
assert l1.rpc.getpeer(l2.info['id']) is None
# This one will succeed.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 25000)
# Should still only have one peer!
assert len(l1.rpc.listpeers()['peers']) == 1
assert len(l2.rpc.listpeers()['peers']) == 1
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_disconnect_half_signed(node_factory):
# Now, these are the corner cases. Fundee sends funding_signed,
# but opener doesn't receive it.
disconnects = ['-WIRE_FUNDING_SIGNED']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['-WIRE_COMMITMENT_SIGNED']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l1.fundwallet(2000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 25000)
# Peer remembers, opener doesn't.
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
assert len(only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['channels']) == 1
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_signed(node_factory):
# This will fail *after* both sides consider channel opening.
disconnects = ['+WIRE_FUNDING_SIGNED']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['+WIRE_COMMITMENT_SIGNED']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.fundwallet(2000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 25000)
# They haven't forgotten each other.
assert l1.rpc.getpeer(l2.info['id'])['id'] == l2.info['id']
assert l2.rpc.getpeer(l1.info['id'])['id'] == l1.info['id']
# Technically, this is async to fundchannel (and could reconnect first)
if EXPERIMENTAL_DUAL_FUND:
l1.daemon.wait_for_logs(['sendrawtx exit 0',
'Peer has reconnected, state DUALOPEND_OPEN_INIT'])
else:
l1.daemon.wait_for_logs(['sendrawtx exit 0',
'Peer has reconnected, state CHANNELD_AWAITING_LOCKIN'])
l1.bitcoin.generate_block(6)
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
@pytest.mark.skip('needs blackhold support')
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_openingd(node_factory):
# Openingd thinks we're still opening; opener reconnects..
disconnects = ['0WIRE_ACCEPT_CHANNEL']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['0WIRE_ACCEPT_CHANNEL2']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundwallet(2000000)
# l2 closes on l1, l1 forgets.
with pytest.raises(RpcError):
l1.rpc.fundchannel(l2.info['id'], 25000)
assert l1.rpc.getpeer(l2.info['id']) is None
# Reconnect.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# We should get a message about reconnecting.
l2.daemon.wait_for_log('Killing opening daemon: Reconnected')
l2.daemon.wait_for_log('Handed peer, entering loop')
# Should work fine.
l1.rpc.fundchannel(l2.info['id'], 25000)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(3)
# Just to be sure, second openingd hand over to channeld. This log line is about channeld being started
l2.daemon.wait_for_log(r'channeld-chan#[0-9]: pid [0-9]+, msgfd [0-9]+')
@pytest.mark.skip('needs blackhold support')
@pytest.mark.developer
def test_reconnect_gossiping(node_factory):
# connectd thinks we're still gossiping; peer reconnects.
disconnects = ['0INVALID 33333']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Make sure l2 knows about l1
wait_for(lambda: l2.rpc.listpeers(l1.info['id'])['peers'] != [])
l2.rpc.sendcustommsg(l1.info['id'], bytes([0x82, 0x35]).hex())
wait_for(lambda: l1.rpc.listpeers(l2.info['id'])['peers'] == [])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('processing now old peer gone')
@pytest.mark.developer("needs dev-disconnect")
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_no_update(node_factory, executor, bitcoind):
"""Test that channel_ready is retransmitted on reconnect if new channel
This tests if the `channel_ready` is sent if we receive a
`channel_reestablish` message with `next_commitment_number` == 1
and our `next_commitment_number` == 1.
This test makes extensive use of disconnects followed by automatic
reconnects. See comments for details.
"""
disconnects = ["-WIRE_CHANNEL_READY", "-WIRE_SHUTDOWN"]
# Allow bad gossip because it might receive WIRE_CHANNEL_UPDATE before
# announcement of the disconnection
l1 = node_factory.get_node(may_reconnect=True, allow_bad_gossip=True)
l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
# For channeld reconnection
l1.rpc.connect(l2.info["id"], "localhost", l2.port)
# LightningNode.fundchannel will fund the channel and generate a
# block. The block triggers the channel_ready message, which
# causes a disconnect. The retransmission is then caused by the
# automatic retry.
fundchannel_exec = executor.submit(l1.fundchannel, l2, 10**6, False)
if l1.config('experimental-dual-fund'):
l1.daemon.wait_for_log(r"dualopend.* Retransmitting channel_ready for channel")
else:
l1.daemon.wait_for_log(r"channeld.* Retransmitting channel_ready for channel")
sync_blockheight(bitcoind, [l1, l2])
fundchannel_exec.result()
l1.stop()
# For closingd reconnection
l1.daemon.start()
# Close will trigger the -WIRE_SHUTDOWN and we then wait for the
# automatic reconnection to trigger the retransmission.
l1.rpc.close(l2.info['id'], 0)
l2.daemon.wait_for_log(r"channeld.* Retransmitting channel_ready for channel")
l1.daemon.wait_for_log(r"CLOSINGD_COMPLETE")
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_normal(node_factory):
# Should reconnect fine even if locked message gets lost.
disconnects = ['-WIRE_CHANNEL_READY',
'+WIRE_CHANNEL_READY']
l1 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_sender_add1(node_factory):
# Fail after add is OK, will cause payment failure though.
# Make sure it doesn't send commit before it sees disconnect though.
disconnects = ['-WIRE_UPDATE_ADD_HTLC',
'+WIRE_UPDATE_ADD_HTLC']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True,
options={'commit-time': 2000},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
amt = 200000000
inv = l2.rpc.invoice(amt, 'test_reconnect_sender_add1', 'desc')
rhash = inv['payment_hash']
assert only_one(l2.rpc.listinvoices('test_reconnect_sender_add1')['invoices'])['status'] == 'unpaid'
route = [{'amount_msat': amt, 'id': l2.info['id'], 'delay': 5, 'channel': first_scid(l1, l2)}]
for i in range(0, len(disconnects)):
with pytest.raises(RpcError):
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
# Wait for reconnection.
l1.daemon.wait_for_log('Already have funding locked in')
# This will send commit, so will reconnect as required.
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_sender_add(node_factory):
disconnects = ['-WIRE_COMMITMENT_SIGNED',
'+WIRE_COMMITMENT_SIGNED',
'-WIRE_REVOKE_AND_ACK',
'+WIRE_REVOKE_AND_ACK']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['=WIRE_COMMITMENT_SIGNED'] + disconnects
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True,
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
amt = 200000000
inv = l2.rpc.invoice(amt, 'testpayment', 'desc')
rhash = inv['payment_hash']
assert only_one(l2.rpc.listinvoices('testpayment')['invoices'])['status'] == 'unpaid'
route = [{'amount_msat': amt, 'id': l2.info['id'], 'delay': 5, 'channel': first_scid(l1, l2)}]
# This will send commit, so will reconnect as required.
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
# Should have printed this for every reconnect.
for i in range(0, len(disconnects)):
l1.daemon.wait_for_log('Already have funding locked in')
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_reconnect_receiver_add(node_factory):
disconnects = ['-WIRE_COMMITMENT_SIGNED',
'+WIRE_COMMITMENT_SIGNED',
'-WIRE_REVOKE_AND_ACK',
'+WIRE_REVOKE_AND_ACK']
if EXPERIMENTAL_DUAL_FUND:
disconnects = ['=WIRE_COMMITMENT_SIGNED'] + disconnects
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(may_reconnect=True, feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
amt = 200000000
inv = l2.rpc.invoice(amt, 'testpayment2', 'desc')
rhash = inv['payment_hash']
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'unpaid'
route = [{'amount_msat': amt, 'id': l2.info['id'], 'delay': 5, 'channel': first_scid(l1, l2)}]
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
for i in range(len(disconnects)):
l1.daemon.wait_for_log('Already have funding locked in')
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
@pytest.mark.developer
def test_reconnect_receiver_fulfill(node_factory):
# Ordering matters: after +WIRE_UPDATE_FULFILL_HTLC, channeld
# will continue and try to send WIRE_COMMITMENT_SIGNED: if
# that's the next failure, it will do two in one run.
disconnects = ['+WIRE_UPDATE_FULFILL_HTLC',
'-WIRE_UPDATE_FULFILL_HTLC',
'-WIRE_COMMITMENT_SIGNED',
'+WIRE_COMMITMENT_SIGNED',
'-WIRE_REVOKE_AND_ACK',
'+WIRE_REVOKE_AND_ACK']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
amt = 200000000
inv = l2.rpc.invoice(amt, 'testpayment2', 'desc')
rhash = inv['payment_hash']
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'unpaid'
route = [{'amount_msat': amt, 'id': l2.info['id'], 'delay': 5, 'channel': first_scid(l1, l2)}]
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
for i in range(len(disconnects)):
l1.daemon.wait_for_log('Already have funding locked in')
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_shutdown_reconnect(node_factory):
disconnects = ['-WIRE_SHUTDOWN',
'+WIRE_SHUTDOWN']
l1 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan, _ = l1.fundchannel(l2, 10**6)
l1.pay(l2, 200000000)
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 0
# This should wait until we're closed.
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
@pytest.mark.developer
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True})
# When l1 restarts (with rescan=1), make it think it hasn't
# reached announce_depth, so it wont re-send announcement_signatures
def no_blocks_above(req):
if req['params'][0] > 107:
return {"result": None,
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
else:
return {'result': l1.bitcoin.rpc.getblockhash(req['params'][0]),
"error": None, 'id': req['id']}
l1.daemon.rpcproxy.mock_rpc('getblockhash', no_blocks_above)
l1.restart()
# l2 will now uses (REMOTE's) announcement_signatures it has stored
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Channel ready for use. Channel announced.'])
# But l2 still sends its own sigs on reconnect
l2.daemon.wait_for_logs([r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES',
r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES'])
# l1 only did send them the first time
assert(''.join(l1.daemon.logs).count(r'peer_out WIRE_ANNOUNCEMENT_SIGNATURES') == 1)
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_shutdown_awaiting_lockin(node_factory, bitcoind):
l1 = node_factory.get_node()
l2 = node_factory.get_node(options={'funding-confirms': 3})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundwallet(10**6 + 1000000)
chanid = l1.rpc.fundchannel(l2.info['id'], 10**6)['channel_id']
# Technically, this is async to fundchannel.
bitcoind.generate_block(1, wait_for_mempool=1)
l1.rpc.close(chanid)
l1_state = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
l2_state = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
l1.daemon.wait_for_log('{}_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN'.format(l1_state))
l2.daemon.wait_for_log('{}_AWAITING_LOCKIN to CHANNELD_SHUTTING_DOWN'.format(l2_state))
l1.daemon.wait_for_log('CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log('CHANNELD_SHUTTING_DOWN to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
bitcoind.generate_block(100)
# Won't disconnect!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'] == [])
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_funding_change(node_factory, bitcoind):
"""Add some funds, fund a channel, and make sure we remember the change
"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10000000)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 10000000
l1.rpc.fundchannel(l2.info['id'], 1000000)
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
outputs = {r['status']: r['value'] for r in l1.db_query(
'SELECT status, SUM(value) AS value FROM outputs GROUP BY status;')}
# The 10m out is spent and we have a change output of 9m-fee
assert outputs[0] > 8990000
assert outputs[2] == 10000000