forked from swiftlang/swift
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDriverUtils.swift
375 lines (318 loc) · 10.5 KB
/
DriverUtils.swift
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
//===--- DriverUtils.swift ------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
import Darwin
struct BenchResults {
var delim: String = ","
var sampleCount: UInt64 = 0
var min: UInt64 = 0
var max: UInt64 = 0
var mean: UInt64 = 0
var sd: UInt64 = 0
var median: UInt64 = 0
init() {}
init(delim: String, sampleCount: UInt64, min: UInt64, max: UInt64, mean: UInt64, sd: UInt64, median: UInt64) {
self.delim = delim
self.sampleCount = sampleCount
self.min = min
self.max = max
self.mean = mean
self.sd = sd
self.median = median
// Sanity the bounds of our results
precondition(self.min <= self.max, "min should always be <= max")
precondition(self.min <= self.mean, "min should always be <= mean")
precondition(self.min <= self.median, "min should always be <= median")
precondition(self.max >= self.mean, "max should always be >= mean")
precondition(self.max >= self.median, "max should always be >= median")
}
}
extension BenchResults : CustomStringConvertible {
var description: String {
return "\(sampleCount)\(delim)\(min)\(delim)\(max)\(delim)\(mean)\(delim)\(sd)\(delim)\(median)"
}
}
struct Test {
let name: String
let index: Int
let f: (Int) -> ()
var run: Bool
init(name: String, n: Int, f: @escaping (Int) -> ()) {
self.name = name
self.index = n
self.f = f
run = true
}
}
public var precommitTests: [String : (Int) -> ()] = [:]
public var otherTests: [String : (Int) -> ()] = [:]
enum TestAction {
case Run
case ListTests
case Fail(String)
}
struct TestConfig {
/// The delimiter to use when printing output.
var delim: String = ","
/// The filters applied to our test names.
var filters = [String]()
/// The scalar multiple of the amount of times a test should be run. This
/// enables one to cause tests to run for N iterations longer than they
/// normally would. This is useful when one wishes for a test to run for a
/// longer amount of time to perform performance analysis on the test in
/// instruments.
var iterationScale: Int = 1
/// If we are asked to have a fixed number of iterations, the number of fixed
/// iterations.
var fixedNumIters: UInt = 0
/// The number of samples we should take of each test.
var numSamples: Int = 1
/// Is verbose output enabled?
var verbose: Bool = false
/// Should we only run the "pre-commit" tests?
var onlyPrecommit: Bool = true
/// After we run the tests, should the harness sleep to allow for utilities
/// like leaks that require a PID to run on the test harness.
var afterRunSleep: Int?
/// The list of tests to run.
var tests = [Test]()
mutating func processArguments() -> TestAction {
let validOptions = [
"--iter-scale", "--num-samples", "--num-iters",
"--verbose", "--delim", "--run-all", "--list", "--sleep"
]
let maybeBenchArgs: Arguments? = parseArgs(validOptions)
if maybeBenchArgs == nil {
return .Fail("Failed to parse arguments")
}
let benchArgs = maybeBenchArgs!
if let _ = benchArgs.optionalArgsMap["--list"] {
return .ListTests
}
if let x = benchArgs.optionalArgsMap["--iter-scale"] {
if x.isEmpty { return .Fail("--iter-scale requires a value") }
iterationScale = Int(x)!
}
if let x = benchArgs.optionalArgsMap["--num-iters"] {
if x.isEmpty { return .Fail("--num-iters requires a value") }
fixedNumIters = numericCast(Int(x)!)
}
if let x = benchArgs.optionalArgsMap["--num-samples"] {
if x.isEmpty { return .Fail("--num-samples requires a value") }
numSamples = Int(x)!
}
if let _ = benchArgs.optionalArgsMap["--verbose"] {
verbose = true
print("Verbose")
}
if let x = benchArgs.optionalArgsMap["--delim"] {
if x.isEmpty { return .Fail("--delim requires a value") }
delim = x
}
if let _ = benchArgs.optionalArgsMap["--run-all"] {
onlyPrecommit = false
}
if let x = benchArgs.optionalArgsMap["--sleep"] {
if x.isEmpty {
return .Fail("--sleep requires a non-empty integer value")
}
let v: Int? = Int(x)
if v == nil {
return .Fail("--sleep requires a non-empty integer value")
}
afterRunSleep = v!
}
filters = benchArgs.positionalArgs
return .Run
}
mutating func findTestsToRun() {
var i = 1
for benchName in precommitTests.keys.sorted() {
tests.append(Test(name: benchName, n: i, f: precommitTests[benchName]!))
i += 1
}
for benchName in otherTests.keys.sorted() {
tests.append(Test(name: benchName, n: i, f: otherTests[benchName]!))
i += 1
}
for i in 0..<tests.count {
if onlyPrecommit && precommitTests[tests[i].name] == nil {
tests[i].run = false
}
if !filters.isEmpty &&
!filters.contains(String(tests[i].index)) &&
!filters.contains(tests[i].name) {
tests[i].run = false
}
}
}
}
func internalMeanSD(_ inputs: [UInt64]) -> (UInt64, UInt64) {
// If we are empty, return 0, 0.
if inputs.isEmpty {
return (0, 0)
}
// If we have one element, return elt, 0.
if inputs.count == 1 {
return (inputs[0], 0)
}
// Ok, we have 2 elements.
var sum1: UInt64 = 0
var sum2: UInt64 = 0
for i in inputs {
sum1 += i
}
let mean: UInt64 = sum1 / UInt64(inputs.count)
for i in inputs {
sum2 = sum2 &+ UInt64((Int64(i) &- Int64(mean))&*(Int64(i) &- Int64(mean)))
}
return (mean, UInt64(sqrt(Double(sum2)/(Double(inputs.count) - 1))))
}
func internalMedian(_ inputs: [UInt64]) -> UInt64 {
return inputs.sorted()[inputs.count / 2]
}
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
@_silgen_name("swift_leaks_startTrackingObjects")
func startTrackingObjects(_: UnsafeMutableRawPointer) -> ()
@_silgen_name("swift_leaks_stopTrackingObjects")
func stopTrackingObjects(_: UnsafeMutableRawPointer) -> Int
#endif
class SampleRunner {
var info = mach_timebase_info_data_t(numer: 0, denom: 0)
init() {
mach_timebase_info(&info)
}
func run(_ name: String, fn: (Int) -> Void, num_iters: UInt) -> UInt64 {
// Start the timer.
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
var str = name
startTrackingObjects(UnsafeMutableRawPointer(str._core.startASCII))
#endif
let start_ticks = mach_absolute_time()
fn(Int(num_iters))
// Stop the timer.
let end_ticks = mach_absolute_time()
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
stopTrackingObjects(UnsafeMutableRawPointer(str._core.startASCII))
#endif
// Compute the spent time and the scaling factor.
let elapsed_ticks = end_ticks - start_ticks
return elapsed_ticks * UInt64(info.numer) / UInt64(info.denom)
}
}
/// Invoke the benchmark entry point and return the run time in milliseconds.
func runBench(_ name: String, _ fn: (Int) -> Void, _ c: TestConfig) -> BenchResults {
var samples = [UInt64](repeating: 0, count: c.numSamples)
if c.verbose {
print("Running \(name) for \(c.numSamples) samples.")
}
let sampler = SampleRunner()
for s in 0..<c.numSamples {
let time_per_sample: UInt64 = 1_000_000_000 * UInt64(c.iterationScale)
var scale : UInt
var elapsed_time : UInt64 = 0
if c.fixedNumIters == 0 {
elapsed_time = sampler.run(name, fn: fn, num_iters: 1)
scale = UInt(time_per_sample / elapsed_time)
} else {
// Compute the scaling factor if a fixed c.fixedNumIters is not specified.
scale = c.fixedNumIters
}
// Rerun the test with the computed scale factor.
if scale > 1 {
if c.verbose {
print(" Measuring with scale \(scale).")
}
elapsed_time = sampler.run(name, fn: fn, num_iters: scale)
} else {
scale = 1
}
// save result in microseconds or k-ticks
samples[s] = elapsed_time / UInt64(scale) / 1000
if c.verbose {
print(" Sample \(s),\(samples[s])")
}
}
let (mean, sd) = internalMeanSD(samples)
// Return our benchmark results.
return BenchResults(delim: c.delim, sampleCount: UInt64(samples.count),
min: samples.min()!, max: samples.max()!,
mean: mean, sd: sd, median: internalMedian(samples))
}
func printRunInfo(_ c: TestConfig) {
if c.verbose {
print("--- CONFIG ---")
print("NumSamples: \(c.numSamples)")
print("Verbose: \(c.verbose)")
print("IterScale: \(c.iterationScale)")
if c.fixedNumIters != 0 {
print("FixedIters: \(c.fixedNumIters)")
}
print("Tests Filter: \(c.filters)")
print("Tests to run: ", terminator: "")
for t in c.tests {
if t.run {
print("\(t.name), ", terminator: "")
}
}
print("")
print("")
print("--- DATA ---")
}
}
func runBenchmarks(_ c: TestConfig) {
let units = "us"
print("#\(c.delim)TEST\(c.delim)SAMPLES\(c.delim)MIN(\(units))\(c.delim)MAX(\(units))\(c.delim)MEAN(\(units))\(c.delim)SD(\(units))\(c.delim)MEDIAN(\(units))")
var SumBenchResults = BenchResults()
SumBenchResults.sampleCount = 0
for t in c.tests {
if !t.run {
continue
}
let BenchIndex = t.index
let BenchName = t.name
let BenchFunc = t.f
let results = runBench(BenchName, BenchFunc, c)
print("\(BenchIndex)\(c.delim)\(BenchName)\(c.delim)\(results.description)")
fflush(stdout)
SumBenchResults.min += results.min
SumBenchResults.max += results.max
SumBenchResults.mean += results.mean
SumBenchResults.sampleCount += 1
// Don't accumulate SD and Median, as simple sum isn't valid for them.
// TODO: Compute SD and Median for total results as well.
// SumBenchResults.sd += results.sd
// SumBenchResults.median += results.median
}
print("")
print("Totals\(c.delim)\(SumBenchResults.description)")
}
public func main() {
var config = TestConfig()
switch (config.processArguments()) {
case let .Fail(msg):
// We do this since we need an autoclosure...
fatalError("\(msg)")
case .ListTests:
config.findTestsToRun()
print("Enabled Tests:")
for t in config.tests {
print(" \(t.name)")
}
case .Run:
config.findTestsToRun()
printRunInfo(config)
runBenchmarks(config)
if let x = config.afterRunSleep {
sleep(UInt32(x))
}
}
}