forked from ElementsProject/lightning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
fixtures.py
255 lines (192 loc) · 7.23 KB
/
fixtures.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
from concurrent import futures
from utils import NodeFactory, BitcoinD
import logging
import os
import pytest
import re
import shutil
import sys
import tempfile
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
TEST_DEBUG = os.getenv("TEST_DEBUG", "0") == "1"
if TEST_DEBUG:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# A dict in which we count how often a particular test has run so far. Used to
# give each attempt its own numbered directory, and avoid clashes.
__attempts = {}
@pytest.fixture(scope="session")
def test_base_dir():
directory = tempfile.mkdtemp(prefix='ltests-')
print("Running tests in {}".format(directory))
yield directory
if os.listdir(directory) == []:
shutil.rmtree(directory)
@pytest.fixture
def directory(request, test_base_dir, test_name):
"""Return a per-test specific directory.
This makes a unique test-directory even if a test is rerun multiple times.
"""
global __attempts
# Auto set value if it isn't in the dict yet
__attempts[test_name] = __attempts.get(test_name, 0) + 1
directory = os.path.join(test_base_dir, "{}_{}".format(test_name, __attempts[test_name]))
request.node.has_errors = False
yield directory
# This uses the status set in conftest.pytest_runtest_makereport to
# determine whether we succeeded or failed. Outcome can be None if the
# failure occurs during the setup phase, hence the use to getattr instead
# of accessing it directly.
outcome = getattr(request.node, 'rep_call', None)
failed = not outcome or request.node.has_errors or outcome != 'passed'
if not failed:
shutil.rmtree(directory)
else:
logging.debug("Test execution failed, leaving the test directory {} intact.".format(directory))
@pytest.fixture
def test_name(request):
yield request.function.__name__
@pytest.fixture
def bitcoind(directory):
bitcoind = BitcoinD(bitcoin_dir=directory)
try:
bitcoind.start()
except Exception:
bitcoind.stop()
raise
info = bitcoind.rpc.getnetworkinfo()
if info['version'] < 160000:
bitcoind.rpc.stop()
raise ValueError("bitcoind is too old. At least version 16000 (v0.16.0)"
" is needed, current version is {}".format(info['version']))
info = bitcoind.rpc.getblockchaininfo()
# Make sure we have some spendable funds
if info['blocks'] < 101:
bitcoind.generate_block(101 - info['blocks'])
elif bitcoind.rpc.getwalletinfo()['balance'] < 1:
logging.debug("Insufficient balance, generating 1 block")
bitcoind.generate_block(1)
yield bitcoind
try:
bitcoind.stop()
except Exception:
bitcoind.proc.kill()
bitcoind.proc.wait()
@pytest.fixture
def node_factory(request, directory, test_name, bitcoind, executor):
nf = NodeFactory(test_name, bitcoind, executor, directory=directory)
yield nf
err_count = 0
ok = nf.killall([not n.may_fail for n in nf.nodes])
def check_errors(request, err_count, msg):
"""Just a simple helper to format a message, set flags on request and then raise
"""
if err_count:
request.node.has_errors = True
raise ValueError(msg.format(err_count))
if VALGRIND:
for node in nf.nodes:
err_count += printValgrindErrors(node)
check_errors(request, err_count, "{} nodes reported valgrind errors")
for node in nf.nodes:
err_count += printCrashLog(node)
check_errors(request, err_count, "{} nodes had crash.log files")
for node in [n for n in nf.nodes if not n.allow_broken_log]:
err_count += checkBroken(node)
check_errors(request, err_count, "{} nodes had BROKEN messages")
for node in nf.nodes:
err_count += checkReconnect(node)
check_errors(request, err_count, "{} nodes had unexpected reconnections")
for node in nf.nodes:
err_count += checkBadGossip(node)
check_errors(request, err_count, "{} nodes had bad gossip messages")
for node in nf.nodes:
err_count += checkBadReestablish(node)
check_errors(request, err_count, "{} nodes had bad reestablish")
for node in nf.nodes:
err_count += checkBadHSMRequest(node)
if err_count:
raise ValueError("{} nodes had bad hsm requests".format(err_count))
for node in nf.nodes:
err_count += checkMemleak(node)
if err_count:
raise ValueError("{} nodes had memleak messages".format(err_count))
if not ok:
request.node.has_errors = True
raise Exception("At least one lightning exited with unexpected non-zero return code")
def getValgrindErrors(node):
for error_file in os.listdir(node.daemon.lightning_dir):
if not re.fullmatch(r"valgrind-errors.\d+", error_file):
continue
with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f:
errors = f.read().strip()
if errors:
return errors, error_file
return None, None
def printValgrindErrors(node):
errors, fname = getValgrindErrors(node)
if errors:
print("-" * 31, "Valgrind errors", "-" * 32)
print("Valgrind error file:", fname)
print(errors)
print("-" * 80)
return 1 if errors else 0
def getCrashLog(node):
if node.may_fail:
return None, None
try:
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log')
with open(crashlog, 'r') as f:
return f.readlines(), crashlog
except Exception:
return None, None
def printCrashLog(node):
errors, fname = getCrashLog(node)
if errors:
print("-" * 10, "{} (last 50 lines)".format(fname), "-" * 10)
print("".join(errors[-50:]))
print("-" * 80)
return 1 if errors else 0
def checkReconnect(node):
# Without DEVELOPER, we can't suppress reconnection.
if node.may_reconnect or not DEVELOPER:
return 0
if node.daemon.is_in_log('Peer has reconnected'):
return 1
return 0
def checkBadGossip(node):
# We can get bad gossip order from inside error msgs.
if node.daemon.is_in_log('Bad gossip order from (?!error)'):
# This can happen if a node sees a node_announce after a channel
# is deleted, however.
if node.daemon.is_in_log('Deleting channel'):
return 0
return 1
# Other 'Bad' messages shouldn't happen.
if node.daemon.is_in_log(r'gossipd.*Bad (?!gossip order from error)'):
return 1
return 0
def checkBroken(node):
# We can get bad gossip order from inside error msgs.
if node.daemon.is_in_log(r'\*\*BROKEN\*\*'):
return 1
return 0
def checkBadReestablish(node):
if node.daemon.is_in_log('Bad reestablish'):
return 1
return 0
def checkBadHSMRequest(node):
if node.daemon.is_in_log('bad hsm request'):
return 1
return 0
def checkMemleak(node):
if node.daemon.is_in_log('MEMLEAK:'):
return 1
return 0
@pytest.fixture
def executor():
ex = futures.ThreadPoolExecutor(max_workers=20)
yield ex
ex.shutdown(wait=False)