Skip to content

Commit

Permalink
yapf tools
Browse files Browse the repository at this point in the history
  • Loading branch information
ncteisen committed Dec 12, 2017
1 parent 888093c commit 5f8bf79
Show file tree
Hide file tree
Showing 10 changed files with 548 additions and 469 deletions.
22 changes: 12 additions & 10 deletions tools/debug/core/chttp2_ref_leak.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,25 @@
import sys
import re


def new_obj():
return ['destroy']
return ['destroy']


outstanding = collections.defaultdict(new_obj)

# Sample log line:
# chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]

for line in sys.stdin:
m = re.search(r'chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
if m:
if m.group(1) == ' ref':
outstanding[m.group(2)].append(m.group(3))
else:
outstanding[m.group(2)].remove(m.group(3))
m = re.search(
r'chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
if m:
if m.group(1) == ' ref':
outstanding[m.group(2)].append(m.group(3))
else:
outstanding[m.group(2)].remove(m.group(3))

for obj, remaining in outstanding.items():
if remaining:
print 'LEAKED: %s %r' % (obj, remaining)

if remaining:
print 'LEAKED: %s %r' % (obj, remaining)
34 changes: 17 additions & 17 deletions tools/debug/core/error_ref_leak.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,22 +26,22 @@

errs = []
for line in data:
# if we care about the line
if re.search(r'error.cc', line):
# str manip to cut off left part of log line
line = line.partition('error.cc:')[-1]
line = re.sub(r'\d+] ', r'', line)
line = line.strip().split()
err = line[0].strip(":")
if line[1] == "create":
assert(err not in errs)
errs.append(err)
elif line[0] == "realloc":
errs.remove(line[1])
errs.append(line[3])
# explicitly look for the last dereference
elif line[1] == "1" and line[3] == "0":
assert(err in errs)
errs.remove(err)
# if we care about the line
if re.search(r'error.cc', line):
# str manip to cut off left part of log line
line = line.partition('error.cc:')[-1]
line = re.sub(r'\d+] ', r'', line)
line = line.strip().split()
err = line[0].strip(":")
if line[1] == "create":
assert (err not in errs)
errs.append(err)
elif line[0] == "realloc":
errs.remove(line[1])
errs.append(line[3])
# explicitly look for the last dereference
elif line[1] == "1" and line[3] == "0":
assert (err in errs)
errs.remove(err)

print "leaked:", errs
7 changes: 1 addition & 6 deletions tools/distrib/yapf_code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,7 @@ cd "$(dirname "${0}")/../.."

DIRS=(
'src/python'
'tools/buildgen'
'tools/codegen'
'tools/distrib'
'tools/interop_matrix'
'tools/profiling'
'tools/run_tests'
'tools'
)
EXCLUSIONS=(
'grpcio/grpc_*.py'
Expand Down
88 changes: 47 additions & 41 deletions tools/flakes/detect_flakes.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Detect new flakes introduced in the last 24h hours with respect to the
previous six days"""

Expand All @@ -32,26 +31,29 @@

import big_query_utils


def print_table(table):
kokoro_base_url = 'https://kokoro.corp.google.com/job/'
for k, v in table.items():
job_name = v[0]
build_id = v[1]
ts = int(float(v[2]))
# TODO(dgq): timezone handling is wrong. We need to determine the timezone
# of the computer running this script.
human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S PDT')
job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
full_kokoro_url = kokoro_base_url + job_path
print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts, full_kokoro_url))
job_name = v[0]
build_id = v[1]
ts = int(float(v[2]))
# TODO(dgq): timezone handling is wrong. We need to determine the timezone
# of the computer running this script.
human_ts = datetime.datetime.utcfromtimestamp(ts).strftime(
'%Y-%m-%d %H:%M:%S PDT')
job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
full_kokoro_url = kokoro_base_url + job_path
print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts,
full_kokoro_url))


def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None):
""" period is one of "WEEK", "DAY", etc.
""" period is one of "WEEK", "DAY", etc.
(see https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#date_add). """

bq = big_query_utils.create_big_query()
query = """
bq = big_query_utils.create_big_query()
query = """
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
job_name,
Expand All @@ -65,41 +67,45 @@ def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None):
AND NOT REGEXP_MATCH(job_name, '.*portability.*')
AND result != 'PASSED' AND result != 'SKIPPED'
ORDER BY timestamp desc
""".format(days_lower_bound=days_lower_bound, days_upper_bound=days_upper_bound)
if limit:
query += '\n LIMIT {}'.format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
rows = page.get('rows')
if rows:
return {row['f'][0]['v']:
""".format(
days_lower_bound=days_lower_bound, days_upper_bound=days_upper_bound)
if limit:
query += '\n LIMIT {}'.format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
rows = page.get('rows')
if rows:
return {
row['f'][0]['v']:
(row['f'][1]['v'], row['f'][2]['v'], row['f'][3]['v'])
for row in rows}
else:
return {}
for row in rows
}
else:
return {}


def get_new_flakes():
last_week_sans_yesterday = get_flaky_tests(-14, -1)
last_24 = get_flaky_tests(0, +1)
last_week_sans_yesterday_names = set(last_week_sans_yesterday.keys())
last_24_names = set(last_24.keys())
logging.debug('|last_week_sans_yesterday| =', len(last_week_sans_yesterday_names))
logging.debug('|last_24_names| =', len(last_24_names))
new_flakes = last_24_names - last_week_sans_yesterday_names
logging.debug('|new_flakes| = ', len(new_flakes))
return {k: last_24[k] for k in new_flakes}
last_week_sans_yesterday = get_flaky_tests(-14, -1)
last_24 = get_flaky_tests(0, +1)
last_week_sans_yesterday_names = set(last_week_sans_yesterday.keys())
last_24_names = set(last_24.keys())
logging.debug('|last_week_sans_yesterday| =',
len(last_week_sans_yesterday_names))
logging.debug('|last_24_names| =', len(last_24_names))
new_flakes = last_24_names - last_week_sans_yesterday_names
logging.debug('|new_flakes| = ', len(new_flakes))
return {k: last_24[k] for k in new_flakes}


def main():
new_flakes = get_new_flakes()
if new_flakes:
print("Found {} new flakes:".format(len(new_flakes)))
print_table(new_flakes)
else:
print("No new flakes found!")
new_flakes = get_new_flakes()
if new_flakes:
print("Found {} new flakes:".format(len(new_flakes)))
print_table(new_flakes)
else:
print("No new flakes found!")


if __name__ == '__main__':
main()
main()
Loading

0 comments on commit 5f8bf79

Please sign in to comment.