Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
fields = line.split()
samples = float(fields[0])
callstack = fields[1:]
callstack = [self.symbols[symbol_id] for symbol_id in callstack]
callee = callstack[0]
callee[SAMPLES] += samples
self.profile[SAMPLES] += samples
for caller in callstack[1:]:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = samples
caller.add_call(call)
else:
call[SAMPLES2] += samples
callee = caller
def parse_subentry(self):
entry = Struct()
line = self.consume()
mo = self.entry_re.match(line)
if not mo:
raise ParseError('failed to parse', line)
fields = mo.groupdict()
entry.samples = int(mo.group(1))
if 'source' in fields and fields['source'] != '(no location information)':
source = fields['source']
filename, lineno = source.split(':')
entry.filename = filename
entry.lineno = int(lineno)
else:
source = ''
entry.filename = None
entry.lineno = None
entry.image = fields.get('image', '')
def __init__(self, *filename):
import pstats
try:
self.stats = pstats.Stats(*filename)
except ValueError:
if PYTHON_3:
sys.stderr.write('error: failed to load %s\n' % ', '.join(filename))
sys.exit(1)
import hotshot.stats
self.stats = hotshot.stats.load(filename[0])
self.profile = Profile()
self.function_ids = {}
function_id = process + '!' + symbol
try:
function = self.profile.functions[function_id]
except KeyError:
module, name = symbol.split('!', 1)
function = Function(function_id, name)
function.process = process
function.module = module
function[SAMPLES] = 0
self.profile.add_function(function)
return function
class SleepyParser(Parser):
"""Parser for GNU gprof output.
See also:
- http://www.codersnotes.com/sleepy/
- http://sleepygraph.sourceforge.net/
"""
stdinInput = False
def __init__(self, filename):
Parser.__init__(self)
from zipfile import ZipFile
self.database = ZipFile(filename)
# - In the absence of call counts, call ratios are faked from the relative
# proportions of total time. This affects only the weighting of the calls.
# - Different header, separator, and end marker.
# - Extra whitespace after function names.
# - You get a full entry for , which does not have parents.
# - Cycles do have parents. These are saved but unused (as they are
# for functions).
# - Disambiguated "unrecognized call graph entry" error messages.
# Notes:
# - Total time of functions as reported by AXE passes the val3 test.
# - CPU Time:Children in the input is sometimes a negative number. This
# value goes to the variable descendants, which is unused.
# - The format of gprof-cc reports is unaffected by the use of
# -knob enable-call-counts=true (no call counts, ever), or
# -show-as=samples (results are quoted in seconds regardless).
class AXEParser(Parser):
"Parser for VTune Amplifier XE 2013 gprof-cc report output."
def __init__(self, fp):
Parser.__init__(self)
self.fp = fp
self.functions = {}
self.cycles = {}
def readline(self):
line = self.fp.readline()
if not line:
sys.stderr.write('error: unexpected end of file\n')
sys.exit(1)
line = line.rstrip('\r\n')
return line
call[TOTAL_TIME] = ratio(value, nc)*ct
caller.add_call(call)
if False:
self.stats.print_stats()
self.stats.print_callees()
# Compute derived events
self.profile.validate()
self.profile.ratio(TIME_RATIO, TIME)
self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return self.profile
class DtraceParser(LineParser):
"""Parser for linux perf callgraph output.
It expects output generated with
# Refer to https://github.com/brendangregg/FlameGraph#dtrace
# 60 seconds of user-level stacks, including time spent in-kernel, for PID 12345 at 97 Hertz
sudo dtrace -x ustackframes=100 -n 'profile-97 /pid == 12345/ { @[ustack()] = count(); } tick-60s { exit(0); }' -o out.user_stacks
# The dtrace output
gprof2dot.py -f dtrace out.user_stacks
# Notice: sometimes, the dtrace outputs format may be latin-1, and gprof2dot will fail to parse it.
# To solve this problem, you should use iconv to convert to UTF-8 explicitly.
# TODO: add an encoding flag to tell gprof2dot how to decode the profile file.
iconv -f ISO-8859-1 -t UTF-8 out.user_stacks | gprof2dot.py -f dtrace
"""
parser = Format(args[0])
profile = parser.parse()
if options.output is None:
if PYTHON_3:
output = open(sys.stdout.fileno(), mode='wt', encoding='UTF-8', closefd=False)
else:
output = sys.stdout
else:
if PYTHON_3:
output = open(options.output, 'wt', encoding='UTF-8')
else:
output = open(options.output, 'wt')
dot = DotWriter(output)
dot.strip = options.strip
dot.wrap = options.wrap
labelNames = options.node_labels or defaultLabelNames
dot.show_function_events = [labels[l] for l in labelNames]
if options.show_samples:
dot.show_function_events.append(SAMPLES)
profile = profile
profile.prune(options.node_thres/100.0, options.edge_thres/100.0, options.filter_paths, options.color_nodes_by_selftime)
if options.root:
rootIds = profile.getFunctionIds(options.root)
if not rootIds:
sys.stderr.write('root node ' + options.root + ' not found (might already be pruned : try -e0 -n0 flags)\n')
sys.exit(1)
def parse_subentry(self):
entry = Struct()
line = self.consume()
mo = self.entry_re.match(line)
if not mo:
raise ParseError('failed to parse', line)
fields = mo.groupdict()
entry.samples = int(mo.group(1))
if 'source' in fields and fields['source'] != '(no location information)':
source = fields['source']
filename, lineno = source.split(':')
entry.filename = filename
entry.lineno = int(lineno)
else:
source = ''
entry.filename = None
entry.lineno = None
entry.image = fields.get('image', '')
entry.application = fields.get('application', '')
if 'symbol' in fields and fields['symbol'] != '(no symbols)':
entry.symbol = fields['symbol']
else:
function_name, file, line = match.groups()
trace += [(function_name, file, line)]
self.traces[int(tid)] = trace
def parse_samples(self):
self.consume()
self.consume()
while not self.lookahead().startswith('CPU'):
rank, percent_self, percent_accum, count, traceid, method = self.lookahead().split()
self.samples[int(traceid)] = (int(count), method)
self.consume()
class SysprofParser(XmlParser):
def __init__(self, stream):
XmlParser.__init__(self, stream)
def parse(self):
objects = {}
nodes = {}
self.element_start('profile')
while self.token.type == XML_ELEMENT_START:
if self.token.name_or_data == 'objects':
assert not objects
objects = self.parse_items('objects')
elif self.token.name_or_data == 'nodes':
assert not nodes
nodes = self.parse_items('nodes')