Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_column_init_no_error_on_default_scale(self):
Column("memUsed", None, None, None, self.measure_type, "B")
def test_column_init_no_error_on_same_unit_without_scale(self):
Column("memUsed", None, None, None, self.measure_type, "B", "B", None)
def setUp(self):
self.max_dec_digits = 6
self.sig_figures = 4
self.measure_type = ColumnMeasureType(self.max_dec_digits)
self.measure_column = Column(
"CpuTime", None, self.sig_figures, None, self.measure_type, None, None, 1
)
self.default_optionals = (False, "html")
from benchexec.tablegenerator.columns import Column
import zipfile
# Process pool for parallel work.
# Some of our loops are CPU-bound (e.g., statistics calculations), thus we use
# processes, not threads.
# Fully initialized only in main() because we cannot do so in the worker processes.
parallel = util.DummyExecutor()
# Most important columns that should be shown first in tables (in the given order)
MAIN_COLUMNS = [
Column("status"),
Column("category"),
Column("cputime"),
Column("walltime"),
Column("memory", unit="MB", source_unit="B"),
Column(
"memUsage", display_title="memory", unit="MB", source_unit="B"
), # if old results are given
Column("cpuenergy"),
]
NAME_START = "results" # first part of filename of table
DEFAULT_OUTPUT_PATH = "results/"
TEMPLATE_FORMATS = ["html", "csv"]
_BYTE_FACTOR = 1000 # bytes in a kilobyte
UNIT_CONVERSION = {
Extract all columns mentioned in the result tag of a table definition file.
"""
def handle_path(path):
"""Convert path from a path relative to table-definition file."""
if not path or path.startswith("http://") or path.startswith("https://"):
return path
return os.path.join(os.path.dirname(table_definition_file), path)
columns = []
for c in xmltag.findall("column"):
scale_factor = c.get("scaleFactor")
display_unit = c.get("displayUnit")
source_unit = c.get("sourceUnit")
new_column = Column(
c.get("title"),
c.text,
c.get("numberOfDigits"),
handle_path(c.get("href")),
None,
display_unit,
source_unit,
scale_factor,
c.get("relevantForDiff"),
c.get("displayTitle"),
)
columns.append(new_column)
return columns
for s in run_results
for c in s.findall("column")
if all_columns or c.get("hidden") != "true"
}
if not column_names:
# completely empty results break stuff, add at least status column
return [MAIN_COLUMNS[0]]
# Put main columns first, then rest sorted alphabetically
custom_columns = column_names.difference(
column.title for column in MAIN_COLUMNS
)
return [
column for column in MAIN_COLUMNS if column.title in column_names
] + [Column(title) for title in sorted(custom_columns)]
import benchexec.result as result
import benchexec.util
from benchexec.tablegenerator import htmltable, statistics, util
from benchexec.tablegenerator.columns import Column
import zipfile
# Process pool for parallel work.
# Some of our loops are CPU-bound (e.g., statistics calculations), thus we use
# processes, not threads.
# Fully initialized only in main() because we cannot do so in the worker processes.
parallel = util.DummyExecutor()
# Most important columns that should be shown first in tables (in the given order)
MAIN_COLUMNS = [
Column("status"),
Column("category"),
Column("cputime"),
Column("walltime"),
Column("memory", unit="MB", source_unit="B"),
Column(
"memUsage", display_title="memory", unit="MB", source_unit="B"
), # if old results are given
Column("cpuenergy"),
]
NAME_START = "results" # first part of filename of table
DEFAULT_OUTPUT_PATH = "results/"
TEMPLATE_FORMATS = ["html", "csv"]
import benchexec.util
from benchexec.tablegenerator import htmltable, statistics, util
from benchexec.tablegenerator.columns import Column
import zipfile
# Process pool for parallel work.
# Some of our loops are CPU-bound (e.g., statistics calculations), thus we use
# processes, not threads.
# Fully initialized only in main() because we cannot do so in the worker processes.
parallel = util.DummyExecutor()
# Most important columns that should be shown first in tables (in the given order)
MAIN_COLUMNS = [
Column("status"),
Column("category"),
Column("cputime"),
Column("walltime"),
Column("memory", unit="MB", source_unit="B"),
Column(
"memUsage", display_title="memory", unit="MB", source_unit="B"
), # if old results are given
Column("cpuenergy"),
]
NAME_START = "results" # first part of filename of table
DEFAULT_OUTPUT_PATH = "results/"
TEMPLATE_FORMATS = ["html", "csv"]
_BYTE_FACTOR = 1000 # bytes in a kilobyte
# Some of our loops are CPU-bound (e.g., statistics calculations), thus we use
# processes, not threads.
# Fully initialized only in main() because we cannot do so in the worker processes.
parallel = util.DummyExecutor()
# Most important columns that should be shown first in tables (in the given order)
MAIN_COLUMNS = [
Column("status"),
Column("category"),
Column("cputime"),
Column("walltime"),
Column("memory", unit="MB", source_unit="B"),
Column(
"memUsage", display_title="memory", unit="MB", source_unit="B"
), # if old results are given
Column("cpuenergy"),
]
NAME_START = "results" # first part of filename of table
DEFAULT_OUTPUT_PATH = "results/"
TEMPLATE_FORMATS = ["html", "csv"]
_BYTE_FACTOR = 1000 # bytes in a kilobyte
UNIT_CONVERSION = {
"s": {"ms": 1000, "min": 1.0 / 60, "h": 1.0 / 3600},
"B": {"kB": 1.0 / 10 ** 3, "MB": 1.0 / 10 ** 6, "GB": 1.0 / 10 ** 9},
"J": {
"kJ": 1.0 / 10 ** 3,
"Ws": 1,