forked from google/fuzzbench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlogs.py
225 lines (176 loc) · 7.33 KB
/
logs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set up for logging."""
from enum import Enum
import logging
import os
import sys
import traceback
import google.cloud.logging
from google.cloud.logging.handlers.handlers import CloudLoggingHandler
from google.cloud import error_reporting
# Disable this check since we have a bunch of non-constant globals in this file.
# pylint: disable=invalid-name
from common import retry
from common import utils
_default_logger = None
_log_client = None
_error_reporting_client = None
_default_extras = {}
LOG_LENGTH_LIMIT = 250 * 1000
NUM_RETRIES = 5
RETRY_DELAY = 1
def _initialize_cloud_clients():
"""Initialize clients for Google Cloud Logging and Error reporting."""
assert not utils.is_local()
global _log_client
if _log_client:
return
_log_client = google.cloud.logging.Client()
logging_handler = CloudLoggingHandler(_log_client)
logging.getLogger().addHandler(logging_handler)
global _error_reporting_client
_error_reporting_client = error_reporting.Client()
def initialize(name='fuzzbench', default_extras=None, log_level=logging.INFO):
"""Initializes stackdriver logging if running on Google Cloud."""
logging.getLogger().setLevel(log_level)
logging.getLogger().addFilter(LengthFilter())
# Don't log so much with SQLalchemy to avoid stressing the logging library.
# See crbug.com/1044343.
logging.getLogger('sqlalchemy').setLevel(logging.ERROR)
if utils.is_local():
return
_initialize_cloud_clients()
global _default_logger
_default_logger = _log_client.logger(name)
default_extras = {} if default_extras is None else default_extras
_set_instance_name(default_extras)
_set_experiment(default_extras)
global _default_extras
_default_extras.update(default_extras)
def _set_instance_name(extras: dict):
"""Set instance_name in |extras| if it is provided by the environment and
not already set."""
if 'instance_name' in extras:
return
instance_name = os.getenv('INSTANCE_NAME')
if instance_name is None:
return
extras['instance_name'] = instance_name
def _set_experiment(extras: dict):
"""Set experiment in |extras| if it is provided by the environment and
not already set."""
if 'experiment' in extras:
return
experiment = os.getenv('EXPERIMENT')
if experiment is None:
return
extras['experiment'] = experiment
class Logger:
"""Wrapper around logging.Logger that allows it to be used like we use the
root logger for stackdriver."""
def __init__(self, name, default_extras=None, log_level=logging.INFO):
if not utils.is_local():
_initialize_cloud_clients()
self.logger = _log_client.logger(name)
else:
self.logger = logging.getLogger(name)
logging.getLogger(name).setLevel(log_level)
logging.getLogger(name).addFilter(LengthFilter())
self.default_extras = default_extras if default_extras else {}
def error(self, *args, **kwargs):
"""Wrapper that uses _log_function_wrapper to call error."""
self._log_function_wrapper(error, *args, **kwargs)
def warning(self, *args, **kwargs):
"""Wrapper that uses _log_function_wrapper to call warning."""
self._log_function_wrapper(warning, *args, **kwargs)
def info(self, *args, **kwargs):
"""Wrapper that uses _log_function_wrapper to call info."""
self._log_function_wrapper(info, *args, **kwargs)
def debug(self, *args, **kwargs):
"""Wrapper that uses _log_function_wrapper to call debug."""
self._log_function_wrapper(debug, *args, **kwargs)
def _log_function_wrapper(self, log_function, message, *args, extras=None):
"""Wrapper around log functions that passes extras and the logger this
object wraps (self.logger)."""
extras = {} if extras is None else extras
extras = extras.copy()
extras.update(self.default_extras)
log_function(message, *args, extras=extras, logger=self.logger)
class LogSeverity(Enum):
"""Enum for different levels of log severity."""
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
@retry.wrap(NUM_RETRIES, RETRY_DELAY, 'common.logs.log')
def log(logger, severity, message, *args, extras=None):
"""Log a message with severity |severity|. If using stack driver logging
then |extras| is also logged (in addition to default extras)."""
message = str(message)
if utils.is_local():
if extras:
message += ' Extras: ' + str(extras)
logging.log(severity, message, *args)
return
if logger is None:
logger = _default_logger
assert logger
if args:
message = message % args
struct_message = {
'message': message,
}
all_extras = _default_extras.copy()
extras = extras or {}
all_extras.update(extras)
struct_message.update(all_extras)
severity = LogSeverity(severity).name
logger.log_struct(struct_message, severity=severity)
def error(message, *args, extras=None, logger=None):
"""Logs |message| to stackdriver logging and error reporting (including
exception if there was one."""
@retry.wrap(NUM_RETRIES, RETRY_DELAY,
'common.logs.error._report_error_with_retries')
def _report_error_with_retries(message):
if utils.is_local():
return
_error_reporting_client.report(message)
if not any(sys.exc_info()):
_report_error_with_retries(message % args)
log(logger, logging.ERROR, message, *args, extras=extras)
return
# I can't figure out how to include both the message and the exception
# other than this having the exception message preceed the log message
# (without using private APIs).
_report_error_with_retries(traceback.format_exc() + '\nMessage: ' +
message % args)
extras = {} if extras is None else extras
extras['traceback'] = traceback.format_exc()
log(logger, logging.ERROR, message, *args, extras=extras)
def warning(message, *args, extras=None, logger=None):
"""Log a message with severity 'WARNING'."""
log(logger, logging.WARNING, message, *args, extras=extras)
def info(message, *args, extras=None, logger=None):
"""Log a message with severity 'INFO'."""
log(logger, logging.INFO, message, *args, extras=extras)
def debug(message, *args, extras=None, logger=None):
"""Log a message with severity 'DEBUG'."""
log(logger, logging.DEBUG, message, *args, extras=extras)
class LengthFilter(logging.Filter):
"""Filter for truncating log messages that are too long for stackdriver."""
def filter(self, record):
if len(record.msg) > LOG_LENGTH_LIMIT:
record.msg = ('TRUNCATED: ' + record.msg)[:LOG_LENGTH_LIMIT]
return True