Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_is_thread_local(self, D):
"""
The context is *not* shared between threads.
"""
class TestThread(threading.Thread):
def __init__(self, d):
self._d = d
threading.Thread.__init__(self)
def run(self):
assert "tl" not in self._d._dict
self._d["tl"] = 23
d = wrap_dict(dict)()
d["tl"] = 42
t = TestThread(d)
t.start()
t.join()
assert 42 == d._dict["tl"]
# Set werkzeug logging level
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(level=levels[EQ_WERKZEUG_LOG_LEVEL])
def parse_exception(_, __, event_dict):
if EQ_DEVELOPER_LOGGING:
return event_dict
exception = event_dict.get('exception')
if exception:
event_dict['exception'] = exception.replace("\"", "'").split("\n")
return event_dict
# setup file logging
renderer_processor = ConsoleRenderer() if EQ_DEVELOPER_LOGGING else JSONRenderer()
processors = [add_log_level, TimeStamper(key='created', fmt='iso'), add_service, format_exc_info, parse_exception, renderer_processor]
configure(context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors, cache_logger_on_first_use=True)
def __init__(self, message, case_number):
#Logging setup for json only
self.message = message
self.timestamp = int(time.time())
self.datetime = datetime.utcnow().isoformat()
self.desc = "AWS_IR Action"
configure(
processors=[JSONRenderer(indent=1, sort_keys=True)],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory()
)
self.log = get_logger('aws_ir.json')
event = ReturnLogger().msg(
'message',
message=self.message,
timestamp=self.timestamp,
datetime=self.datetime,
desc=self.desc
)
def generate_log_filename(case_number):
filename = ("/tmp/{case_number}-aws_ir.log").format(case_number=case_number)
return filename
some_function()
# ...
return "logged in!"
if __name__ == "__main__":
logging.basicConfig(
format="%(message)s", stream=sys.stdout, level=logging.INFO
)
structlog.configure(
processors=[
structlog.processors.KeyValueRenderer(
key_order=["event", "request_id"]
)
],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
)
app.run()
import logging
import os
import sys
import time
from functools import wraps
from random import random
import structlog
LOG_FORMAT = "%(message)s"
DEFAULT_STREAM = sys.stdout
WRAPPED_DICT_CLASS = structlog.threadlocal.wrap_dict(dict)
ENV_APIG_REQUEST_ID = "_FLEECE_APIG_REQUEST_ID"
ENV_LAMBDA_REQUEST_ID = "_FLEECE_LAMBDA_REQUEST_ID"
def clobber_root_handlers():
[logging.root.removeHandler(handler) for handler in logging.root.handlers[:]]
class logme(object):
"""Log requests and responses"""
def __init__(self, level=logging.DEBUG, logger=None):
self.level = level
if not logger:
self.logger = logging.getLogger()
else:
)
#
# def _log_exception(sender, exception, exc_info=None, **extra):
# app.struct_log.error(exc_info=exception)
#
#
# got_request_exception.connect(_log_exception, app)
if __name__ == "__main__":
app.logger.info(app.logger_name) # DO NOT REMOVE -- needed to init logger
logging.config.dictConfig(LOGGING_CONFIG)
structlog.configure(
processors=STRUCTLOG_PROCESSORS,
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
)
app.struct_log = logger.new()
app.run(host="0.0.0.0", use_reloader=True)
# server root directory, makes tracebacks prettier
root=os.path.dirname(os.path.realpath(__file__)),
# flask already sets up logging
allow_logging_basic_config=False)
# send exceptions from `app` to rollbar, using flask's signal system.
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
if __name__ == '__main__':
from structlog.stdlib import LoggerFactory
from structlog.threadlocal import wrap_dict
import logging
structlog.configure(
context_class=wrap_dict(dict),
logger_factory=LoggerFactory(),
)
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
}
structlog.configure(
processors=[
structlog.stdlib.filter_by_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
structlog.processors.ExceptionPrettyPrinter(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
context_class=structlog.threadlocal.wrap_dict(dict),
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def configure():
conf(
context_class=threadlocal.wrap_dict(dict),
logger_factory=stdlib.LoggerFactory(),
wrapper_class=stdlib.BoundLogger,
processors=[
stdlib.PositionalArgumentsFormatter(),
processors.TimeStamper(fmt="iso"),
processors.StackInfoRenderer(),
processors.format_exc_info,
processors.UnicodeDecoder(),
stdlib.render_to_log_kwargs,
]
def logbook_factory(*args, **kwargs):
# Logger given to structlog
logbook.compat.redirect_logging()
return logbook.Logger(level=level, *args, **kwargs)
# Setup structlog over logbook, with args list at the end
processors = [
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
UnstructuredRenderer(),
]
structlog.configure(
context_class=structlog.threadlocal.wrap_dict(dict),
processors=processors,
logger_factory=logbook_factory,
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)