Skip to content

Commit 0796eaa

Browse files
[Python] Fix 80-column violations
1 parent 479d792 commit 0796eaa

32 files changed

+512
-245
lines changed

.pep8

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
[flake8]
22
filename = *.py,Benchmark_Driver,Benchmark_DTrace.in,Benchmark_GuardMalloc.in,Benchmark_RuntimeLeaksRunner.in,build-script,gyb,line-directive,ns-html2rst,recursive-lipo,rth,submit-benchmark-results,update-checkout,viewcfg
3-
ignore = D100,D101,D102,D103,D104,D105,E402,E501
3+
ignore = D100,D101,D102,D103,D104,D105,E402
4+
max-line-length = 80

benchmark/scripts/Benchmark_DTrace.in

+26-10
Original file line numberDiff line numberDiff line change
@@ -31,17 +31,21 @@ XFAIL_LIST = [
3131
class DTraceResult(perf_test_driver.Result):
3232

3333
def __init__(self, name, status, output, csv_output):
34-
perf_test_driver.Result.__init__(self, name, status, output, XFAIL_LIST)
34+
perf_test_driver.Result.__init__(
35+
self, name, status, output, XFAIL_LIST)
3536
self.csv_output = csv_output
3637

3738
@classmethod
3839
def data_headers(cls):
39-
return ['Name', 'Result', 'strong_retain', 'strong_retain/iter', 'strong_release', 'strong_release/iter']
40+
return [
41+
'Name', 'Result', 'strong_retain', 'strong_retain/iter',
42+
'strong_release', 'strong_release/iter']
4043

4144
@classmethod
4245
def data_format(cls, max_test_len):
4346
non_name_headers = DTraceResult.data_headers()[1:]
44-
fmt = ('{:<%d}' % (max_test_len + 5)) + ''.join(['{:<%d}' % (len(h) + 2) for h in non_name_headers])
47+
fmt = ('{:<%d}' % (max_test_len + 5)) + \
48+
''.join(['{:<%d}' % (len(h) + 2) for h in non_name_headers])
4549
return fmt
4650

4751
@classmethod
@@ -82,10 +86,15 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
8286
sys.stdout.flush()
8387

8488
def get_results_with_iters(iters):
85-
p = subprocess.Popen(['sudo', 'dtrace', '-s', DTRACE_PATH, '-c', '%s %s %s' % (data['path'], data['test_name'], '--num-iters=%d' % iters)],
86-
stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
89+
p = subprocess.Popen([
90+
'sudo', 'dtrace', '-s', DTRACE_PATH,
91+
'-c', '%s %s %s' % (data['path'], data['test_name'],
92+
'--num-iters=%d' % iters)
93+
], stdout=subprocess.PIPE, stderr=open('/dev/null', 'w'))
8794
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
88-
return [x.split(',')[1] for x in results[results.index('DTRACE RESULTS') + 1:]]
95+
return [
96+
x.split(',')[1] for x in
97+
results[results.index('DTRACE RESULTS') + 1:]]
8998
iter_2_results = get_results_with_iters(2)
9099
iter_3_results = get_results_with_iters(3)
91100

@@ -101,10 +110,17 @@ SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
101110

102111
def parse_args():
103112
parser = argparse.ArgumentParser()
104-
parser.add_argument('-filter', type=str, default=None,
105-
help='Filter out any test that does not match the given regex')
106-
parser.add_argument('-csv', default=False, action='store_true',
107-
help="Emit csv output", dest='csv_output')
113+
parser.add_argument(
114+
'-filter',
115+
type=str,
116+
default=None,
117+
help='Filter out any test that does not match the given regex')
118+
parser.add_argument(
119+
'-csv',
120+
default=False,
121+
action='store_true',
122+
help="Emit csv output",
123+
dest='csv_output')
108124
return parser.parse_args()
109125

110126
if __name__ == "__main__":

benchmark/scripts/Benchmark_RuntimeLeaksRunner.in

+23-14
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ import perf_test_driver
2626
XFAIL_LIST = [
2727
]
2828

29-
# Global objective-c classes created by various frameworks. We do not care about these.
29+
# Global objective-c classes created by various frameworks. We do not care about
30+
# these.
3031
IGNORABLE_GLOBAL_OBJC_CLASSES = set([
3132
'__NSPlaceholderDate',
3233
'NSCache',
@@ -60,6 +61,7 @@ class LeaksRunnerResult(perf_test_driver.Result):
6061
print(fmt.format(self.get_name(), self.get_result(),
6162
self.get_count()))
6263

64+
6365
class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
6466

6567
def __init__(self, binary, xfail_list, num_samples, num_iters):
@@ -81,54 +83,61 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
8183

8284
def run_test(self, data, num_iters):
8385
try:
84-
p = subprocess.Popen([data['path'], "--run-all", "--num-samples={}".format(data['num_samples']),
85-
"--num-iters={}".format(num_iters), data['test_name']],
86-
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
86+
p = subprocess.Popen([
87+
data['path'], "--run-all",
88+
"--num-samples={}".format(data['num_samples']),
89+
"--num-iters={}".format(num_iters), data['test_name']],
90+
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
8791
error_out = p.communicate()[1].split("\n")
8892
except OSError:
89-
print("Child Process Failed! (%s,%s)" % (data['path'], data['test_name']))
93+
print("Child Process Failed! (%s,%s)" % (
94+
data['path'], data['test_name']))
9095
return None
9196

9297
try:
93-
# We grab the second line since swift globals get lazily created in the
94-
# first iteration.
98+
# We grab the second line since swift globals get lazily created in
99+
# the first iteration.
95100
d = json.loads(error_out[1])
96-
d['objc_objects'] = [x for x in d['objc_objects'] if x not in IGNORABLE_GLOBAL_OBJC_CLASSES]
101+
d['objc_objects'] = [x for x in d['objc_objects']
102+
if x not in IGNORABLE_GLOBAL_OBJC_CLASSES]
97103
d['objc_count'] = len(d['objc_objects'])
98104

99105
total_count = d['objc_count'] + d['swift_count']
100106
return total_count
101107
except (KeyError, ValueError):
102-
print("Failed parse output! (%s,%s)" % (data['path'], data['test_name']))
108+
print("Failed parse output! (%s,%s)" %
109+
(data['path'], data['test_name']))
103110
return None
104111

105-
106112
def process_input(self, data):
107113
test_name = '({},{})'.format(data['opt'], data['test_name'])
108114
print("Running {}...".format(test_name))
109115
sys.stdout.flush()
110116
total_count1 = self.run_test(data, data['num_iters'])
111117
if total_count1 is None:
112118
return LeaksRunnerResult(test_name)
113-
total_count2 = self.run_test(data, data['num_iters']+1)
119+
total_count2 = self.run_test(data, data['num_iters'] + 1)
114120
if total_count2 is None:
115121
return LeaksRunnerResult(test_name)
116122
return LeaksRunnerResult(test_name, total_count2 - total_count1)
117123

118124
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
119125

126+
120127
def parse_args():
121128
import argparse
122129
parser = argparse.ArgumentParser()
123-
parser.add_argument('-filter', type=str, default=None,
124-
help='Filter out any test that does not match the given regex')
130+
parser.add_argument(
131+
'-filter', type=str, default=None,
132+
help='Filter out any test that does not match the given regex')
125133
parser.add_argument('-num-samples', type=int, default=2)
126134
parser.add_argument('-num-iters', type=int, default=2)
127135
return parser.parse_args()
128136

129137
if __name__ == "__main__":
130138
args = parse_args()
131-
l = LeaksRunnerBenchmarkDriver(SWIFT_BIN_DIR, XFAIL_LIST, args.num_samples, args.num_iters)
139+
l = LeaksRunnerBenchmarkDriver(
140+
SWIFT_BIN_DIR, XFAIL_LIST, args.num_samples, args.num_iters)
132141
if l.run(args.filter):
133142
sys.exit(0)
134143
else:

benchmark/scripts/compare_perf_tests.py

+14-7
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@
1616
# e.g.
1717
# repeat.sh 3 tot/bin/Benchmark_Driver run -o -O > tot.O.times
1818
# repeat.sh 3 mypatch/bin/Benchmark_Driver run -o -O > mypatch.O.times
19-
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t
19+
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | \
20+
# column -s, -t
2021

2122
from __future__ import print_function
2223
import re
@@ -26,8 +27,10 @@
2627
VERBOSE = 0
2728

2829
# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
29-
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
30-
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
30+
SCORERE = re.compile(
31+
r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
32+
TOTALRE = re.compile(
33+
r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
3134
NUMGROUP = 1
3235
KEYGROUP = 2
3336
BESTGROUP = 4
@@ -70,7 +73,8 @@ def get_scores(fname):
7073
scores[m.group(KEYGROUP)] = []
7174
worstscores[m.group(KEYGROUP)] = []
7275
scores[m.group(KEYGROUP)].append(parse_int(m.group(BESTGROUP)))
73-
worstscores[m.group(KEYGROUP)].append(parse_int(m.group(WORSTGROUP)))
76+
worstscores[m.group(KEYGROUP)].append(
77+
parse_int(m.group(WORSTGROUP)))
7478
if is_total:
7579
nums[m.group(KEYGROUP)] = ""
7680
else:
@@ -83,7 +87,8 @@ def get_scores(fname):
8387

8488

8589
def is_max_score(newscore, maxscore, invert):
86-
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)
90+
return not maxscore or \
91+
(newscore > maxscore if not invert else newscore < maxscore)
8792

8893

8994
def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
@@ -129,7 +134,8 @@ def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
129134
print (("%+d" % (bestscore2 - bestscore1)).rjust(9), end="")
130135

131136
if bestscore1 != 0 and bestscore2 != 0:
132-
print (("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9), end="")
137+
print (("%+.1f%%" %
138+
(((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9), end="")
133139
if ShowSpeedup:
134140
Num, Den = float(bestscore2), float(bestscore1)
135141
if IsTime:
@@ -221,4 +227,5 @@ def usage():
221227
if key not in scores2:
222228
print(key, "not in", file2)
223229
continue
224-
compare_scores(key, scores1[key], worstscores1[key], scores2[key], worstscores2[key], runs, nums[key])
230+
compare_scores(key, scores1[key], worstscores1[key], scores2[key],
231+
worstscores2[key], runs, nums[key])

benchmark/scripts/generate_harness/generate_harness.py

+1
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242

4343
# CMakeList multi-source
4444
class MultiSourceBench(object):
45+
4546
def __init__(self, path):
4647
self.name = os.path.basename(path)
4748
self.files = [x for x in os.listdir(path)

benchmark/scripts/perf_test_driver/perf_test_driver.py

+18-7
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ def __init__(self, name, status, output, xfail_list):
2424
self.name = name
2525
self.status = status
2626
self.output = output
27-
self.is_xfailed = any((re.match(x, self.name) is not None for x in xfail_list))
27+
self.is_xfailed = any(
28+
(re.match(x, self.name) is not None for x in xfail_list))
2829

2930
def is_failure(self):
3031
return self.get_result() in ['FAIL', 'XPASS']
@@ -51,7 +52,7 @@ def get_data(self):
5152
return self.data
5253

5354
def merge_in_extra_data(self, d):
54-
"""Rather than modifying the extra data dict, just return it as a no-op"""
55+
"""Rather than modifying the extra data dict, return it as a no-op"""
5556
return d
5657

5758
def print_data(self, max_test_len):
@@ -67,8 +68,10 @@ def _unwrap_self(args):
6768

6869
class BenchmarkDriver(object):
6970

70-
def __init__(self, binary_dir, xfail_list, enable_parallel=False, opt_levels=BenchmarkDriver_OptLevels):
71-
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o) for o in opt_levels]
71+
def __init__(self, binary_dir, xfail_list, enable_parallel=False,
72+
opt_levels=BenchmarkDriver_OptLevels):
73+
self.targets = [(os.path.join(binary_dir, 'Benchmark_%s' % o), o)
74+
for o in opt_levels]
7275
self.xfail_list = xfail_list
7376
self.enable_parallel = enable_parallel
7477
self.data = None
@@ -85,7 +88,8 @@ def process_input(self, data):
8588

8689
def run_for_opt_level(self, binary, opt_level, test_filter):
8790
print("testing driver at path: %s" % binary)
88-
names = [n.strip() for n in subprocess.check_output([binary, "--list"]).split()[2:]]
91+
names = [n.strip() for n in subprocess.check_output(
92+
[binary, "--list"]).split()[2:]]
8993
if test_filter:
9094
regex = re.compile(test_filter)
9195
names = [n for n in names if regex.match(n)]
@@ -111,7 +115,12 @@ def reduce_results(acc, r):
111115
acc['extra_data'] = r.merge_in_extra_data(acc['extra_data'])
112116
return acc
113117

114-
return reduce(reduce_results, results, {'result': [], 'has_failure': False, 'max_test_len': 0, 'extra_data': {}})
118+
return reduce(reduce_results, results, {
119+
'result': [],
120+
'has_failure': False,
121+
'max_test_len': 0,
122+
'extra_data': {}
123+
})
115124

116125
def print_data(self, data, max_test_len):
117126
print("Results:")
@@ -121,7 +130,9 @@ def print_data(self, data, max_test_len):
121130
r.print_data(max_test_len)
122131

123132
def run(self, test_filter=None):
124-
self.data = [self.run_for_opt_level(binary, opt_level, test_filter) for binary, opt_level in self.targets]
133+
self.data = [
134+
self.run_for_opt_level(binary, opt_level, test_filter)
135+
for binary, opt_level in self.targets]
125136
max_test_len = reduce(max, [d['max_test_len']for d in self.data])
126137
has_failure = reduce(max, [d['has_failure']for d in self.data])
127138
self.print_data(self.data, max_test_len)

docs/conf.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# documentation root, use os.path.abspath to make it absolute, like shown here.
1919
# sys.path.insert(0, os.path.abspath('.'))
2020

21-
# -- General configuration -----------------------------------------------------
21+
# -- General configuration -----------------------------------------------
2222

2323
# If your documentation needs a minimal Sphinx version, state it here.
2424
# needs_sphinx = '1.0'
@@ -89,7 +89,7 @@
8989
# modindex_common_prefix = []
9090

9191

92-
# -- Options for HTML output ---------------------------------------------------
92+
# -- Options for HTML output ---------------------------------------------
9393

9494
# The theme to use for HTML and HTML Help pages. See the documentation for
9595
# a list of builtin themes.
@@ -176,7 +176,7 @@
176176
htmlhelp_basename = 'Swiftdoc'
177177

178178

179-
# -- Options for LaTeX output --------------------------------------------------
179+
# -- Options for LaTeX output --------------------------------------------
180180

181181
latex_elements = {
182182
# The paper size ('letterpaper' or 'a4paper').
@@ -217,7 +217,7 @@
217217
# latex_domain_indices = True
218218

219219

220-
# -- Options for manual page output --------------------------------------------
220+
# -- Options for manual page output --------------------------------------
221221

222222
# One entry per manual page. List of tuples
223223
# (source start file, name, description, authors, manual section).
@@ -230,7 +230,7 @@
230230
# man_show_urls = False
231231

232232

233-
# -- Options for Texinfo output ------------------------------------------------
233+
# -- Options for Texinfo output ------------------------------------------
234234

235235
# Grouping the document tree into Texinfo files. List of tuples
236236
# (source start file, target name, title, author,
@@ -255,7 +255,7 @@
255255
intersphinx_mapping = {}
256256

257257

258-
# -- Options for extensions ----------------------------------------------------
258+
# -- Options for extensions ----------------------------------------------
259259

260260
# Enable this if you want TODOs to show up in the generated documentation.
261261
todo_include_todos = True

docs/scripts/ns-html2rst

+2-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ usage: nshtml2rst < NSString.html > NSString.rst
3030
r'<pre>\1</pre>',
3131
html, flags=re.MULTILINE | re.DOTALL)
3232

33-
# Remove links from <code>...</code>, which doesn't have a rendering in ReST
33+
# Remove links from <code>...</code>, which doesn't have a rendering in
34+
# ReST
3435
html = re.sub(
3536
r'<code>(.*?)<a[^>]*?>(.*?)</a>(.*?)</code>',
3637
r'<code>\1\2\3</code>',

test/Driver/Inputs/filelists/check-filelist-abc.py

+9-6
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,12 @@
3030

3131
with open(filelistFile, 'r') as f:
3232
lines = f.readlines()
33-
assert lines[0].endswith("/a.swift\n") or lines[0].endswith("/a.swiftmodule\n")
34-
assert lines[1].endswith("/b.swift\n") or lines[1].endswith("/b.swiftmodule\n")
35-
assert lines[2].endswith("/c.swift\n") or lines[2].endswith("/c.swiftmodule\n")
33+
assert(lines[0].endswith("/a.swift\n") or
34+
lines[0].endswith("/a.swiftmodule\n"))
35+
assert(lines[1].endswith("/b.swift\n") or
36+
lines[1].endswith("/b.swiftmodule\n"))
37+
assert(lines[2].endswith("/c.swift\n") or
38+
lines[2].endswith("/c.swiftmodule\n"))
3639

3740
if primaryFile:
3841
print("Handled", os.path.basename(primaryFile))
@@ -45,7 +48,7 @@
4548
outputListFile = sys.argv[sys.argv.index('-output-filelist') + 1]
4649
with open(outputListFile, 'r') as f:
4750
lines = f.readlines()
48-
assert lines[0].endswith("/a.o\n") or lines[0].endswith("/a.bc\n")
49-
assert lines[1].endswith("/b.o\n") or lines[1].endswith("/b.bc\n")
50-
assert lines[2].endswith("/c.o\n") or lines[2].endswith("/c.bc\n")
51+
assert(lines[0].endswith("/a.o\n") or lines[0].endswith("/a.bc\n"))
52+
assert(lines[1].endswith("/b.o\n") or lines[1].endswith("/b.bc\n"))
53+
assert(lines[2].endswith("/c.o\n") or lines[2].endswith("/c.bc\n"))
5154
print("...with output!")

0 commit comments

Comments
 (0)