Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def report():
# get pseudo file /proc//status
proc_status = '/proc/%d/status' % os.getpid()
try:
t = open(proc_status)
v = t.read()
t.close()
except:
context.log.error('mem', exc_info=True)
return 0, 0
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
results = []
for vm_key in ['VmSize:', 'VmRSS:']:
i = v.index(vm_key)
_ = v[i:].split(None, 3) # whitespace
if len(_) < 3:
results.append(0) # invalid format?
# convert Vm value to bytes
results.append(int(float(_[1]) * scale[_[2]] / 1024))
return results
self.listener_setup_attempts
)
)
self.listener_setup_attempts = 0 # reset attempt counter
except AmplifyAddresssAlreadyInUse as e:
if self.listener_setup_attempts < 3:
context.log.warning(
'failed to start listener during "SyslogTail.__iter__()" due to "%s", '
'will try again (attempts: %s)' % (
e.__class__.__name__,
self.listener_setup_attempts
)
)
context.log.debug('additional info:', exc_info=True)
else:
context.log.error(
'failed to start listener %s times, will not try again' % self.listener_setup_attempts
)
context.log.debug('additional info:', exc_info=True)
current_cache = copy.deepcopy(self.cache)
context.log.debug('syslog tail returned %s lines captured from %s' % (len(current_cache), self.name))
self.cache.clear()
return iter(current_cache)
backpressure_error = HTTP503Error(e)
context.backpressure_time = int(time.time() + backpressure_error.delay)
context.log.debug(
'back pressure delay %s added (next talk: %s)' % (
backpressure_error.delay,
context.backpressure_time
)
)
else:
self.cloud_talk_fails += 1
self.cloud_talk_delay = exponential_delay(self.cloud_talk_fails)
context.log.debug(
'cloud talk delay set to %s (fails: %s)' % (self.cloud_talk_delay, self.cloud_talk_fails)
)
context.log.error('could not connect to cloud', exc_info=True)
raise AmplifyCriticalException()
# check agent version status
if context.version_semver <= cloud_response.versions.obsolete:
context.log.error(
'agent is obsolete - cloud will refuse updates until it is updated (version: %s, current: %s)' %
(tuple_to_version(context.version_semver), tuple_to_version(cloud_response.versions.current))
)
self.stop()
elif context.version_semver <= cloud_response.versions.old:
context.log.warn(
'agent is old - update is recommended (version: %s, current: %s)' %
(tuple_to_version(context.version_semver), tuple_to_version(cloud_response.versions.current))
)
# set capabilities
else:
r = self.session.post(
url,
data=payload,
timeout=timeout,
verify=self.verify_ssl_cert,
proxies=self.proxies
)
http_code = r.status_code
r.raise_for_status()
result = r.json() if json else r.text
request_id = r.headers.get('X-Amplify-ID', None)
return result
except Exception as e:
if log:
context.log.error('failed %s "%s", exception: "%s"' % (method.upper(), url, e.message))
context.log.debug('', exc_info=True)
raise e
finally:
end_time = time.time()
log_method = context.log.info if log else context.log.debug
context.log.debug(result)
log_method(
'[%s] %s %s %s %s %s %.3f' % (
request_id,
method,
url,
http_code,
len(payload),
len(result),
end_time - start_time
)
max children reached: 0
slow requests: 0
"""
try:
with gevent.Timeout(10, TimeoutException):
fcgi = self._connect()
resp = fcgi(self.env, lambda x, y: None)
except TimeoutException:
context.log.error(
'pool communication at "%s" timed out' %
self.connection.__str__()
) # use .__str__() because of namedtuple
context.log.debug('additional info:', exc_info=True)
resp = ('500', [], '', '')
except:
context.log.error(
'failed to communicate with pool at "%s"' %
self.connection.__str__()
) # use .__str__() because of namedtuple
context.log.debug('additional info:', exc_info=True)
resp = ('500', [], '', '')
status, headers, out, err = resp
if status.startswith('200'):
return out
else:
context.log.debug(
'non-success returned by fcgi (status: %s)' % status
)
context.log.debug(
'additional info:\n'
def handle_exception(self, method, exception):
context.log.error('%s failed to collect: %s raised %s%s' % (
self.short_name, method.__name__, exception.__class__.__name__,
' (in container)' if self.in_container else ''
))
context.log.debug('additional info:', exc_info=True)
# if not parsed - go to the next line
if parsed is None:
continue
pid, ppid, cmd = parsed # unpack values
# match master process
if 'master process' in cmd:
if not launch_method_supported("php-fpm", ppid):
continue
try:
conf_path = MASTER_PARSER(cmd)
except:
context.log.error(
'failed to find conf_path for %s' % cmd
)
context.log.debug('additional info:', exc_info=True)
else:
# calculate local_id
local_id = hashlib.sha256('%s_%s' % (cmd, conf_path)).hexdigest()
if pid not in masters:
masters[pid] = {'workers': []}
masters[pid].update({
'cmd': cmd.strip(),
'conf_path': conf_path,
'pid': pid,
'local_id': local_id
})
try:
cmd = "/usr/sbin/mysqld"
conf_path = "/etc/mysql/my.cnf"
# calculate local_id
local_id = hashlib.sha256('%s_%s' % (cmd, conf_path)).hexdigest()
results.append({
'cmd': 'unknown',
'conf_path': 'unknown',
'pid': 'unknown',
'local_id': local_id
})
except Exception as e:
# log error
exception_name = e.__class__.__name__
context.log.error('failed to parse remote mysql results due to %s' % exception_name)
context.log.debug('additional info:', exc_info=True)
return results
context.log.error('failed to check stub_status url %s' % self.object.stub_status_url)
context.log.debug('additional info', exc_info=True)
stub_body = None
if not stub_body:
return
# parse body
try:
gre = STUB_RE.match(stub_body)
if not gre:
raise AmplifyParseException(message='stub status %s' % stub_body)
for field in ('connections', 'accepts', 'handled', 'requests', 'reading', 'writing', 'waiting'):
stub[field] = int(gre.group(field))
except:
context.log.error('failed to parse stub_status body')
raise
# store some variables for further use
stub['dropped'] = stub['accepts'] - stub['handled']
# gauges
self.object.statsd.gauge('nginx.http.conn.current', stub['connections'])
self.object.statsd.gauge('nginx.http.conn.active', stub['connections'] - stub['waiting'])
self.object.statsd.gauge('nginx.http.conn.idle', stub['waiting'])
self.object.statsd.gauge('nginx.http.request.writing', stub['writing'])
self.object.statsd.gauge('nginx.http.request.reading', stub['reading'])
self.object.statsd.gauge('nginx.http.request.current', stub['reading'] + stub['writing'])
# counters
counted_vars = {
'nginx.http.request.count': 'requests',
'uname': None,
'network': {
'interfaces': [],
'default': None
}
})
for method in (
self.uname,
self.network,
):
try:
method(meta)
except Exception as e:
exception_name = e.__class__.__name__
context.log.error('failed to collect meta %s due to %s' % (method.__name__, exception_name))
context.log.debug('additional info:', exc_info=True)
self.object.metad.meta(meta)