forked from flutter/engine
-
Notifications
You must be signed in to change notification settings - Fork 0
/
get_concurrent_jobs.py
executable file
·130 lines (108 loc) · 4.05 KB
/
get_concurrent_jobs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/env python3
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script computes the number of concurrent jobs that can run in the
# build as a function of the machine. It accepts a set of key value pairs
# given by repeated --memory-per-job arguments. For example:
#
# $ get_concurrent_jobs.py --memory-per-job dart=1GB
#
# The result is a json map printed to stdout that gives the number of
# concurrent jobs allowed of each kind. For example:
#
# {"dart": 8}
#
# Some memory can be held out of the calculation with the --reserve-memory flag.
import argparse
import ctypes
import json
import multiprocessing
import os
import re
import subprocess
import sys
UNITS = {'B': 1, 'KB': 2**10, 'MB': 2**20, 'GB': 2**30, 'TB': 2**40}
# pylint: disable=line-too-long
# See https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-globalmemorystatusex
# and https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex
# pylint: enable=line-too-long
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
('dwLength', ctypes.c_ulong),
('dwMemoryLoad', ctypes.c_ulong),
('ullTotalPhys', ctypes.c_ulonglong),
('ullAvailPhys', ctypes.c_ulonglong),
('ullTotalPageFile', ctypes.c_ulonglong),
('ullAvailPageFile', ctypes.c_ulonglong),
('ullTotalVirtual', ctypes.c_ulonglong),
('ullAvailVirtual', ctypes.c_ulonglong),
('sullAvailExtendedVirtual', ctypes.c_ulonglong),
]
def get_total_memory():
if sys.platform in ('win32', 'cygwin'):
stat = MEMORYSTATUSEX(dwLength=ctypes.sizeof(MEMORYSTATUSEX))
success = ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return stat.ullTotalPhys if success else 0
if sys.platform.startswith('linux'):
if os.path.exists('/proc/meminfo'):
with open('/proc/meminfo') as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if match:
return float(match.group(1)) * 2**10
if sys.platform == 'darwin':
try:
return int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
except: # pylint: disable=bare-except
return 0
return 0
def parse_size(string):
i = next(i for (i, c) in enumerate(string) if not c.isdigit())
number = string[:i].strip()
unit = string[i:].strip()
return int(float(number) * UNITS[unit])
class ParseSizeAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
sizes = getattr(args, self.dest, [])
for value in values:
(k, val) = value.split('=', 1)
sizes.append((k, parse_size(val)))
setattr(args, self.dest, sizes)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--memory-per-job',
action=ParseSizeAction,
default=[],
nargs='*',
help='Key value pairings (dart=1GB) giving an estimate of the amount of '
'memory needed for the class of job.'
)
parser.add_argument(
'--reserve-memory',
type=parse_size,
default=0,
help='The amount of memory to be held out of the amount for jobs to use.'
)
args = parser.parse_args()
total_memory = get_total_memory()
# Ensure the total memory used in the calculation below is at least 0
mem_total_bytes = max(0, total_memory - args.reserve_memory)
# Ensure the number of cpus used in the calculation below is at least 1
try:
cpu_cap = multiprocessing.cpu_count()
except: # pylint: disable=bare-except
cpu_cap = 1
concurrent_jobs = {}
for job, memory_per_job in args.memory_per_job:
# Calculate the number of jobs that will fit in memory. Ensure the
# value is at least 1.
num_concurrent_jobs = int(max(1, mem_total_bytes / memory_per_job))
# Cap the number of jobs by the number of cpus available.
concurrent_jobs[job] = min(num_concurrent_jobs, cpu_cap)
print(json.dumps(concurrent_jobs))
return 0
if __name__ == '__main__':
sys.exit(main())